InternalCallVerifier

Verifies values of objects/variables related to AUT calls


Class: org.apache.hadoop.hbase.IntegrationTestLazyCfLoading

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReadersAndWriters() throws Exception { Configuration conf=util.getConfiguration(); String timeoutKey=String.format(TIMEOUT_KEY,this.getClass().getSimpleName()); long maxRuntime=conf.getLong(timeoutKey,DEFAULT_TIMEOUT_MINUTES); long serverCount=util.getHBaseClusterInterface().getClusterStatus().getServersSize(); long keysToWrite=serverCount * KEYS_TO_WRITE_PER_SERVER; Connection connection=ConnectionFactory.createConnection(conf); Table table=connection.getTable(TABLE_NAME); MultiThreadedWriter writer=new MultiThreadedWriter(dataGen,conf,TABLE_NAME); writer.setMultiPut(true); LOG.info("Starting writer; the number of keys to write is " + keysToWrite); writer.start(1,keysToWrite,WRITER_THREADS); long now=EnvironmentEdgeManager.currentTime(); long timeLimit=now + (maxRuntime * 60000); boolean isWriterDone=false; while (now < timeLimit && !isWriterDone) { LOG.info("Starting the scan; wrote approximately " + dataGen.getTotalNumberOfKeys() + " keys"); isWriterDone=writer.isDone(); if (isWriterDone) { LOG.info("Scanning full result, writer is done"); } Scan scan=new Scan(); for ( byte[] cf : dataGen.getColumnFamilies()) { scan.addFamily(cf); } scan.setFilter(dataGen.getScanFilter()); scan.setLoadColumnFamiliesOnDemand(true); long onesGennedBeforeScan=dataGen.getExpectedNumberOfKeys(); long startTs=EnvironmentEdgeManager.currentTime(); ResultScanner results=table.getScanner(scan); long resultCount=0; Result result=null; while ((result=results.next()) != null) { boolean isOk=writer.verifyResultAgainstDataGenerator(result,true,true); Assert.assertTrue("Failed to verify [" + Bytes.toString(result.getRow()) + "]",isOk); ++resultCount; } long timeTaken=EnvironmentEdgeManager.currentTime() - startTs; long onesGennedAfterScan=dataGen.getExpectedNumberOfKeys(); Assert.assertTrue("Read " + resultCount + " keys when at most "+ onesGennedAfterScan+ " were generated ",onesGennedAfterScan >= resultCount); if (isWriterDone) { Assert.assertTrue("Read " + resultCount + " keys; the writer is done and "+ onesGennedAfterScan+ " keys were generated",onesGennedAfterScan == resultCount); } else if (onesGennedBeforeScan * 0.9 > resultCount) { LOG.warn("Read way too few keys (" + resultCount + "/"+ onesGennedBeforeScan+ ") - there might be a problem, or the writer might just be slow"); } LOG.info("Scan took " + timeTaken + "ms"); if (!isWriterDone) { Thread.sleep(WAIT_BETWEEN_SCANS_MS); now=EnvironmentEdgeManager.currentTime(); } } Assert.assertEquals("There are write failures",0,writer.getNumWriteFailures()); Assert.assertTrue("Writer is not done",isWriterDone); connection.close(); }

Class: org.apache.hadoop.hbase.IntegrationTestManyRegions

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testCreateTableWithRegions() throws Exception { CountDownLatch doneSignal=new CountDownLatch(1); Worker worker=new Worker(doneSignal,util.getHBaseAdmin()); Thread t=new Thread(worker); LOG.info("Launching worker thread to create the table."); t.start(); boolean workerComplete=false; workerComplete=doneSignal.await(TIMEOUT_MINUTES,TimeUnit.MINUTES); if (!workerComplete) { t.interrupt(); fail("Timeout limit expired."); } assertTrue("Table creation failed.",worker.isSuccess()); }

Class: org.apache.hadoop.hbase.TestCellComparator

InternalCallVerifier BooleanVerifier 
@Test public void testCompareCellWithKey() throws Exception { KeyValue kv1=new KeyValue(row1,fam1,qual1,val); KeyValue kv2=new KeyValue(row2,fam1,qual1,val); assertTrue((comparator.compare(kv1,kv2.getKey(),0,kv2.getKey().length)) < 0); kv1=new KeyValue(row1,fam2,qual1,val); kv2=new KeyValue(row1,fam1,qual1,val); assertTrue((comparator.compare(kv1,kv2.getKey(),0,kv2.getKey().length)) > 0); kv1=new KeyValue(row1,fam1,qual1,1l,val); kv2=new KeyValue(row1,fam1,qual1,2l,val); assertTrue((comparator.compare(kv1,kv2.getKey(),0,kv2.getKey().length)) > 0); kv1=new KeyValue(row1,fam1,qual1,1l,Type.Put); kv2=new KeyValue(row1,fam1,qual1,1l,Type.Maximum); assertTrue((comparator.compare(kv1,kv2.getKey(),0,kv2.getKey().length)) > 0); kv1=new KeyValue(row1,fam1,qual1,1l,Type.Put); kv2=new KeyValue(row1,fam1,qual1,1l,Type.Put); assertTrue((comparator.compare(kv1,kv2.getKey(),0,kv2.getKey().length)) == 0); }

InternalCallVerifier BooleanVerifier 
@Test public void testCompareCells(){ KeyValue kv1=new KeyValue(row1,fam1,qual1,val); KeyValue kv2=new KeyValue(row2,fam1,qual1,val); assertTrue((comparator.compare(kv1,kv2)) < 0); kv1=new KeyValue(row1,fam2,qual1,val); kv2=new KeyValue(row1,fam1,qual1,val); assertTrue((CellComparator.compareFamilies(kv1,kv2) > 0)); kv1=new KeyValue(row1,fam1,qual1,1l,val); kv2=new KeyValue(row1,fam1,qual1,2l,val); assertTrue((comparator.compare(kv1,kv2) > 0)); kv1=new KeyValue(row1,fam1,qual1,1l,Type.Put); kv2=new KeyValue(row1,fam1,qual1,1l,Type.Maximum); assertTrue((comparator.compare(kv1,kv2) > 0)); kv1=new KeyValue(row1,fam1,qual1,1l,Type.Put); kv2=new KeyValue(row1,fam1,qual1,1l,Type.Put); assertTrue((CellUtil.equals(kv1,kv2))); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompareByteBufferedCell(){ byte[] r1=Bytes.toBytes("row1"); byte[] r2=Bytes.toBytes("row2"); byte[] f1=Bytes.toBytes("cf1"); byte[] q1=Bytes.toBytes("qual1"); byte[] q2=Bytes.toBytes("qual2"); byte[] v=Bytes.toBytes("val1"); KeyValue kv=new KeyValue(r1,f1,q1,v); ByteBuffer buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell1=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); kv=new KeyValue(r2,f1,q1,v); buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell2=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertEquals(0,CellComparator.compareColumns(bbCell1,bbCell2)); assertEquals(0,CellComparator.compareColumns(bbCell1,kv)); kv=new KeyValue(r2,f1,q2,v); buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell3=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertEquals(0,CellComparator.compareFamilies(bbCell2,bbCell3)); assertTrue(CellComparator.compareQualifiers(bbCell2,bbCell3) < 0); assertTrue(CellComparator.compareColumns(bbCell2,bbCell3) < 0); assertEquals(0,CellComparator.COMPARATOR.compareRows(bbCell2,bbCell3)); assertTrue(CellComparator.COMPARATOR.compareRows(bbCell1,bbCell2) < 0); }

Class: org.apache.hadoop.hbase.TestCellUtil

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testToString1(){ String row="test.row"; String family="test.family"; String qualifier="test.qualifier"; long timestamp=42; Type type=Type.Put; String value="test.value"; long seqId=1042; Cell cell=CellUtil.createCell(Bytes.toBytes(row),Bytes.toBytes(family),Bytes.toBytes(qualifier),timestamp,type.getCode(),Bytes.toBytes(value),seqId); String nonVerbose=CellUtil.toString(cell,false); String verbose=CellUtil.toString(cell,true); System.out.println("nonVerbose=" + nonVerbose); System.out.println("verbose=" + verbose); Assert.assertEquals(String.format("%s/%s:%s/%d/%s/vlen=%s/seqid=%s",row,family,qualifier,timestamp,type.toString(),Bytes.toBytes(value).length,seqId),nonVerbose); Assert.assertEquals(String.format("%s/%s:%s/%d/%s/vlen=%s/seqid=%s/%s",row,family,qualifier,timestamp,type.toString(),Bytes.toBytes(value).length,seqId,value),verbose); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test public void testCloneCellFieldsFromByteBufferedCell(){ byte[] r=Bytes.toBytes("row1"); byte[] f=Bytes.toBytes("cf1"); byte[] q=Bytes.toBytes("qual1"); byte[] v=Bytes.toBytes("val1"); byte[] tags=Bytes.toBytes("tag1"); KeyValue kv=new KeyValue(r,f,q,0,q.length,1234L,Type.Put,v,0,v.length,tags); ByteBuffer buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); byte[] rDest=CellUtil.cloneRow(bbCell); assertTrue(Bytes.equals(r,rDest)); byte[] fDest=CellUtil.cloneFamily(bbCell); assertTrue(Bytes.equals(f,fDest)); byte[] qDest=CellUtil.cloneQualifier(bbCell); assertTrue(Bytes.equals(q,qDest)); byte[] vDest=CellUtil.cloneValue(bbCell); assertTrue(Bytes.equals(v,vDest)); byte[] tDest=new byte[tags.length]; CellUtil.copyTagTo(bbCell,tDest,0); assertTrue(Bytes.equals(tags,tDest)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCellFieldsAsPrimitiveTypesFromByteBufferedCell(){ int ri=123; byte[] r=Bytes.toBytes(ri); byte[] f=Bytes.toBytes("cf1"); byte[] q=Bytes.toBytes("qual1"); long vl=10981L; byte[] v=Bytes.toBytes(vl); KeyValue kv=new KeyValue(r,f,q,v); ByteBuffer buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertEquals(ri,CellUtil.getRowAsInt(bbCell)); assertEquals(vl,CellUtil.getValueAsLong(bbCell)); double vd=3005.5; v=Bytes.toBytes(vd); kv=new KeyValue(r,f,q,v); buffer=ByteBuffer.wrap(kv.getBuffer()); bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertEquals(vd,CellUtil.getValueAsDouble(bbCell),0.0); BigDecimal bd=new BigDecimal(9999); v=Bytes.toBytes(bd); kv=new KeyValue(r,f,q,v); buffer=ByteBuffer.wrap(kv.getBuffer()); bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertEquals(bd,CellUtil.getValueAsBigDecimal(bbCell)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test public void testMatchingCellFieldsFromByteBufferedCell(){ byte[] r=Bytes.toBytes("row1"); byte[] f=Bytes.toBytes("cf1"); byte[] q1=Bytes.toBytes("qual1"); byte[] q2=Bytes.toBytes("qual2"); byte[] v=Bytes.toBytes("val1"); byte[] tags=Bytes.toBytes("tag1"); KeyValue kv=new KeyValue(r,f,q1,0,q1.length,1234L,Type.Put,v,0,v.length,tags); ByteBuffer buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell1=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); kv=new KeyValue(r,f,q2,0,q2.length,1234L,Type.Put,v,0,v.length,tags); buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell2=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertTrue(CellUtil.matchingRows(bbCell1,bbCell2)); assertTrue(CellUtil.matchingRows(kv,bbCell2)); assertTrue(CellUtil.matchingRow(bbCell1,r)); assertTrue(CellUtil.matchingFamily(bbCell1,bbCell2)); assertTrue(CellUtil.matchingFamily(kv,bbCell2)); assertTrue(CellUtil.matchingFamily(bbCell1,f)); assertFalse(CellUtil.matchingQualifier(bbCell1,bbCell2)); assertTrue(CellUtil.matchingQualifier(kv,bbCell2)); assertTrue(CellUtil.matchingQualifier(bbCell1,q1)); assertTrue(CellUtil.matchingQualifier(bbCell2,q2)); assertTrue(CellUtil.matchingValue(bbCell1,bbCell2)); assertTrue(CellUtil.matchingValue(kv,bbCell2)); assertTrue(CellUtil.matchingValue(bbCell1,v)); assertFalse(CellUtil.matchingColumn(bbCell1,bbCell2)); assertTrue(CellUtil.matchingColumn(kv,bbCell2)); assertTrue(CellUtil.matchingColumn(bbCell1,f,q1)); assertTrue(CellUtil.matchingColumn(bbCell2,f,q2)); }

InternalCallVerifier EqualityVerifier 
@Test public void testFindCommonPrefixInFlatKey(){ KeyValue kv1=new KeyValue("r1".getBytes(),"f1".getBytes(),"q1".getBytes(),null); Assert.assertEquals(kv1.getKeyLength(),CellUtil.findCommonPrefixInFlatKey(kv1,kv1,true,true)); Assert.assertEquals(kv1.getKeyLength(),CellUtil.findCommonPrefixInFlatKey(kv1,kv1,false,true)); Assert.assertEquals(kv1.getKeyLength() - KeyValue.TIMESTAMP_TYPE_SIZE,CellUtil.findCommonPrefixInFlatKey(kv1,kv1,true,false)); KeyValue kv2=new KeyValue("r12".getBytes(),"f1".getBytes(),"q1".getBytes(),null); Assert.assertEquals(1,CellUtil.findCommonPrefixInFlatKey(kv1,kv2,true,true)); KeyValue kv3=new KeyValue("r14".getBytes(),"f1".getBytes(),"q1".getBytes(),null); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + "r1".getBytes().length,CellUtil.findCommonPrefixInFlatKey(kv2,kv3,true,true)); KeyValue kv4=new KeyValue("r14".getBytes(),"f2".getBytes(),"q1".getBytes(),null); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv3.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE+ "f".getBytes().length,CellUtil.findCommonPrefixInFlatKey(kv3,kv4,false,true)); KeyValue kv5=new KeyValue("r14".getBytes(),"f2".getBytes(),"q123".getBytes(),null); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv3.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE+ kv4.getFamilyLength()+ kv4.getQualifierLength(),CellUtil.findCommonPrefixInFlatKey(kv4,kv5,true,true)); KeyValue kv6=new KeyValue("rk".getBytes(),1234L); KeyValue kv7=new KeyValue("rk".getBytes(),1235L); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv6.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE+ kv6.getFamilyLength()+ kv6.getQualifierLength()+ 7,CellUtil.findCommonPrefixInFlatKey(kv6,kv7,true,true)); KeyValue kv8=new KeyValue("rk".getBytes(),1234L,Type.Delete); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv6.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE+ kv6.getFamilyLength()+ kv6.getQualifierLength()+ KeyValue.TIMESTAMP_SIZE,CellUtil.findCommonPrefixInFlatKey(kv6,kv8,true,true)); Assert.assertEquals(KeyValue.ROW_LENGTH_SIZE + kv6.getRowLength() + KeyValue.FAMILY_LENGTH_SIZE+ kv6.getFamilyLength()+ kv6.getQualifierLength(),CellUtil.findCommonPrefixInFlatKey(kv6,kv8,true,false)); }

Class: org.apache.hadoop.hbase.TestChoreService

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testTriggerNowFailsWhenNotScheduled() throws InterruptedException { final int period=100; final int sleep=5; ChoreService service=new ChoreService("testTriggerNowFailsWhenNotScheduled"); CountingChore chore=new CountingChore("dn",period); try { assertFalse(chore.triggerNow()); assertTrue(chore.getCountOfChoreCalls() == 0); service.scheduleChore(chore); Thread.sleep(sleep); assertEquals(1,chore.getCountOfChoreCalls()); Thread.sleep(period); assertEquals(2,chore.getCountOfChoreCalls()); assertTrue(chore.triggerNow()); Thread.sleep(sleep); assertTrue(chore.triggerNow()); Thread.sleep(sleep); assertTrue(chore.triggerNow()); Thread.sleep(sleep); assertEquals(5,chore.getCountOfChoreCalls()); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testFrequencyOfChores() throws InterruptedException { final int period=100; final int delta=5; ChoreService service=ChoreService.getInstance("testFrequencyOfChores"); CountingChore chore=new CountingChore("countingChore",period); try { service.scheduleChore(chore); Thread.sleep(10 * period + delta); assertTrue(chore.getCountOfChoreCalls() == 11); Thread.sleep(10 * period); assertTrue(chore.getCountOfChoreCalls() == 21); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testShutdownRejectsNewSchedules() throws InterruptedException { final int period=100; ChoreService service=new ChoreService("testShutdownRejectsNewSchedules"); ScheduledChore successChore1=new DoNothingChore("sc1",period); ScheduledChore successChore2=new DoNothingChore("sc2",period); ScheduledChore successChore3=new DoNothingChore("sc3",period); ScheduledChore failChore1=new DoNothingChore("fc1",period); ScheduledChore failChore2=new DoNothingChore("fc2",period); ScheduledChore failChore3=new DoNothingChore("fc3",period); try { assertTrue(service.scheduleChore(successChore1)); assertTrue(successChore1.isScheduled()); assertTrue(service.scheduleChore(successChore2)); assertTrue(successChore2.isScheduled()); assertTrue(service.scheduleChore(successChore3)); assertTrue(successChore3.isScheduled()); } finally { shutdownService(service); } assertFalse(service.scheduleChore(failChore1)); assertFalse(failChore1.isScheduled()); assertFalse(service.scheduleChore(failChore2)); assertFalse(failChore2.isScheduled()); assertFalse(service.scheduleChore(failChore3)); assertFalse(failChore3.isScheduled()); }

InternalCallVerifier BooleanVerifier 
/** * ChoreServices should never have a core pool size that exceeds the number of chores that have * been scheduled with the service. For example, if 4 ScheduledChores are scheduled with a * ChoreService, the number of threads in the ChoreService's core pool should never exceed 4 */ @Test(timeout=20000) public void testMaximumChoreServiceThreads() throws InterruptedException { ChoreService service=new ChoreService("testMaximumChoreServiceThreads"); final int period=100; final int sleepTime=5 * period; try { SlowChore sc1=new SlowChore("sc1",period); SlowChore sc2=new SlowChore("sc2",period); SlowChore sc3=new SlowChore("sc3",period); SlowChore sc4=new SlowChore("sc4",period); SlowChore sc5=new SlowChore("sc5",period); service.scheduleChore(sc1); service.scheduleChore(sc2); service.scheduleChore(sc3); service.scheduleChore(sc4); service.scheduleChore(sc5); Thread.sleep(sleepTime); assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); SlowChore sc6=new SlowChore("sc6",period); SlowChore sc7=new SlowChore("sc7",period); SlowChore sc8=new SlowChore("sc8",period); SlowChore sc9=new SlowChore("sc9",period); SlowChore sc10=new SlowChore("sc10",period); service.scheduleChore(sc6); service.scheduleChore(sc7); service.scheduleChore(sc8); service.scheduleChore(sc9); service.scheduleChore(sc10); Thread.sleep(sleepTime); assertTrue(service.getCorePoolSize() <= service.getNumberOfScheduledChores()); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testForceTrigger() throws InterruptedException { final int period=100; final int delta=5; ChoreService service=ChoreService.getInstance("testForceTrigger"); final CountingChore chore=new CountingChore("countingChore",period); try { service.scheduleChore(chore); Thread.sleep(10 * period + delta); assertTrue(chore.getCountOfChoreCalls() == 11); chore.triggerNow(); Thread.sleep(delta); chore.triggerNow(); Thread.sleep(delta); chore.triggerNow(); Thread.sleep(delta); chore.triggerNow(); Thread.sleep(delta); chore.triggerNow(); Thread.sleep(delta); assertTrue("" + chore.getCountOfChoreCalls(),chore.getCountOfChoreCalls() == 16); Thread.sleep(10 * period + delta); assertTrue("" + chore.getCountOfChoreCalls(),chore.getCountOfChoreCalls() > 16); } finally { shutdownService(service); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=20000) public void testScheduledChoreConstruction(){ final String NAME="chore"; final int PERIOD=100; final long VALID_DELAY=0; final long INVALID_DELAY=-100; final TimeUnit UNIT=TimeUnit.NANOSECONDS; ScheduledChore chore1=new ScheduledChore(NAME,new SampleStopper(),PERIOD,VALID_DELAY,UNIT){ @Override protected void chore(){ } } ; assertEquals("Name construction failed",chore1.getName(),NAME); assertEquals("Period construction failed",chore1.getPeriod(),PERIOD); assertEquals("Initial Delay construction failed",chore1.getInitialDelay(),VALID_DELAY); assertEquals("TimeUnit construction failed",chore1.getTimeUnit(),UNIT); ScheduledChore invalidDelayChore=new ScheduledChore(NAME,new SampleStopper(),PERIOD,INVALID_DELAY,UNIT){ @Override protected void chore(){ } } ; assertEquals("Initial Delay should be set to 0 when invalid",0,invalidDelayChore.getInitialDelay()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=20000) public void testNumberOfRunningChores() throws InterruptedException { ChoreService service=new ChoreService("testNumberOfRunningChores"); final int period=100; final int sleepTime=5; try { DoNothingChore dn1=new DoNothingChore("dn1",period); DoNothingChore dn2=new DoNothingChore("dn2",period); DoNothingChore dn3=new DoNothingChore("dn3",period); DoNothingChore dn4=new DoNothingChore("dn4",period); DoNothingChore dn5=new DoNothingChore("dn5",period); service.scheduleChore(dn1); service.scheduleChore(dn2); service.scheduleChore(dn3); service.scheduleChore(dn4); service.scheduleChore(dn5); Thread.sleep(sleepTime); assertEquals("Scheduled chore mismatch",5,service.getNumberOfScheduledChores()); dn1.cancel(); Thread.sleep(sleepTime); assertEquals("Scheduled chore mismatch",4,service.getNumberOfScheduledChores()); dn2.cancel(); dn3.cancel(); dn4.cancel(); Thread.sleep(sleepTime); assertEquals("Scheduled chore mismatch",1,service.getNumberOfScheduledChores()); dn5.cancel(); Thread.sleep(sleepTime); assertEquals("Scheduled chore mismatch",0,service.getNumberOfScheduledChores()); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testShutdownWorksWhileChoresAreExecuting() throws InterruptedException { final int period=100; final int sleep=5 * period; ChoreService service=new ChoreService("testShutdownWorksWhileChoresAreExecuting"); ScheduledChore slowChore1=new SleepingChore("sc1",period,sleep); ScheduledChore slowChore2=new SleepingChore("sc2",period,sleep); ScheduledChore slowChore3=new SleepingChore("sc3",period,sleep); try { assertTrue(service.scheduleChore(slowChore1)); assertTrue(service.scheduleChore(slowChore2)); assertTrue(service.scheduleChore(slowChore3)); Thread.sleep(sleep / 2); shutdownService(service); assertFalse(slowChore1.isScheduled()); assertFalse(slowChore2.isScheduled()); assertFalse(slowChore3.isScheduled()); assertTrue(service.isShutdown()); Thread.sleep(5); assertTrue(service.isTerminated()); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testChangingChoreServices() throws InterruptedException { final int period=100; final int sleepTime=10; ChoreService service1=new ChoreService("testChangingChoreServices_1"); ChoreService service2=new ChoreService("testChangingChoreServices_2"); ScheduledChore chore=new DoNothingChore("sample",period); try { assertFalse(chore.isScheduled()); assertFalse(service1.isChoreScheduled(chore)); assertFalse(service2.isChoreScheduled(chore)); assertTrue(chore.getChoreServicer() == null); service1.scheduleChore(chore); Thread.sleep(sleepTime); assertTrue(chore.isScheduled()); assertTrue(service1.isChoreScheduled(chore)); assertFalse(service2.isChoreScheduled(chore)); assertFalse(chore.getChoreServicer() == null); service2.scheduleChore(chore); Thread.sleep(sleepTime); assertTrue(chore.isScheduled()); assertFalse(service1.isChoreScheduled(chore)); assertTrue(service2.isChoreScheduled(chore)); assertFalse(chore.getChoreServicer() == null); chore.cancel(); assertFalse(chore.isScheduled()); assertFalse(service1.isChoreScheduled(chore)); assertFalse(service2.isChoreScheduled(chore)); assertTrue(chore.getChoreServicer() == null); } finally { shutdownService(service1); shutdownService(service2); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testCorePoolDecrease() throws InterruptedException { final int initialCorePoolSize=3; ChoreService service=new ChoreService("testCorePoolDecrease",initialCorePoolSize,false); final int chorePeriod=100; try { SlowChore slowChore1=new SlowChore("slowChore1",chorePeriod); SlowChore slowChore2=new SlowChore("slowChore2",chorePeriod); SlowChore slowChore3=new SlowChore("slowChore3",chorePeriod); service.scheduleChore(slowChore1); service.scheduleChore(slowChore2); service.scheduleChore(slowChore3); Thread.sleep(chorePeriod * 10); assertEquals("Should not create more pools than scheduled chores",service.getNumberOfScheduledChores(),service.getCorePoolSize()); SlowChore slowChore4=new SlowChore("slowChore4",chorePeriod); service.scheduleChore(slowChore4); Thread.sleep(chorePeriod * 10); assertEquals("Chores are missing their start time. Should expand core pool size",service.getNumberOfScheduledChores(),service.getCorePoolSize()); SlowChore slowChore5=new SlowChore("slowChore5",chorePeriod); service.scheduleChore(slowChore5); Thread.sleep(chorePeriod * 10); assertEquals("Chores are missing their start time. Should expand core pool size",service.getNumberOfScheduledChores(),service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(),5); slowChore5.cancel(); Thread.sleep(chorePeriod * 10); assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE,service.getNumberOfScheduledChores()),service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(),4); slowChore4.cancel(); Thread.sleep(chorePeriod * 10); assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE,service.getNumberOfScheduledChores()),service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(),3); slowChore3.cancel(); Thread.sleep(chorePeriod * 10); assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE,service.getNumberOfScheduledChores()),service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(),2); slowChore2.cancel(); Thread.sleep(chorePeriod * 10); assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE,service.getNumberOfScheduledChores()),service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(),1); slowChore1.cancel(); Thread.sleep(chorePeriod * 10); assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE,service.getNumberOfScheduledChores()),service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(),0); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testCancelChore() throws InterruptedException { final int period=100; ScheduledChore chore1=new DoNothingChore("chore1",period); ChoreService service=ChoreService.getInstance("testCancelChore"); try { service.scheduleChore(chore1); assertTrue(chore1.isScheduled()); chore1.cancel(true); assertFalse(chore1.isScheduled()); assertTrue(service.getNumberOfScheduledChores() == 0); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testShutdownCancelsScheduledChores() throws InterruptedException { final int period=100; ChoreService service=new ChoreService("testShutdownCancelsScheduledChores"); ScheduledChore successChore1=new DoNothingChore("sc1",period); ScheduledChore successChore2=new DoNothingChore("sc2",period); ScheduledChore successChore3=new DoNothingChore("sc3",period); try { assertTrue(service.scheduleChore(successChore1)); assertTrue(successChore1.isScheduled()); assertTrue(service.scheduleChore(successChore2)); assertTrue(successChore2.isScheduled()); assertTrue(service.scheduleChore(successChore3)); assertTrue(successChore3.isScheduled()); } finally { shutdownService(service); } assertFalse(successChore1.isScheduled()); assertFalse(successChore2.isScheduled()); assertFalse(successChore3.isScheduled()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=20000) public void testCorePoolIncrease() throws InterruptedException { final int initialCorePoolSize=3; ChoreService service=new ChoreService("testCorePoolIncrease",initialCorePoolSize,false); try { assertEquals("Should have a core pool of size: " + initialCorePoolSize,initialCorePoolSize,service.getCorePoolSize()); final int slowChorePeriod=100; SlowChore slowChore1=new SlowChore("slowChore1",slowChorePeriod); SlowChore slowChore2=new SlowChore("slowChore2",slowChorePeriod); SlowChore slowChore3=new SlowChore("slowChore3",slowChorePeriod); service.scheduleChore(slowChore1); service.scheduleChore(slowChore2); service.scheduleChore(slowChore3); Thread.sleep(slowChorePeriod * 10); assertEquals("Should not create more pools than scheduled chores",3,service.getCorePoolSize()); SlowChore slowChore4=new SlowChore("slowChore4",slowChorePeriod); service.scheduleChore(slowChore4); Thread.sleep(slowChorePeriod * 10); assertEquals("Chores are missing their start time. Should expand core pool size",4,service.getCorePoolSize()); SlowChore slowChore5=new SlowChore("slowChore5",slowChorePeriod); service.scheduleChore(slowChore5); Thread.sleep(slowChorePeriod * 10); assertEquals("Chores are missing their start time. Should expand core pool size",5,service.getCorePoolSize()); } finally { shutdownService(service); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=20000) public void testStopperForScheduledChores() throws InterruptedException { ChoreService service=ChoreService.getInstance("testStopperForScheduledChores"); Stoppable stopperForGroup1=new SampleStopper(); Stoppable stopperForGroup2=new SampleStopper(); final int period=100; final int delta=10; try { ScheduledChore chore1_group1=new DoNothingChore("c1g1",stopperForGroup1,period); ScheduledChore chore2_group1=new DoNothingChore("c2g1",stopperForGroup1,period); ScheduledChore chore3_group1=new DoNothingChore("c3g1",stopperForGroup1,period); ScheduledChore chore1_group2=new DoNothingChore("c1g2",stopperForGroup2,period); ScheduledChore chore2_group2=new DoNothingChore("c2g2",stopperForGroup2,period); ScheduledChore chore3_group2=new DoNothingChore("c3g2",stopperForGroup2,period); service.scheduleChore(chore1_group1); service.scheduleChore(chore2_group1); service.scheduleChore(chore3_group1); service.scheduleChore(chore1_group2); service.scheduleChore(chore2_group2); service.scheduleChore(chore3_group2); Thread.sleep(delta); Thread.sleep(10 * period); assertTrue(chore1_group1.isScheduled()); assertTrue(chore2_group1.isScheduled()); assertTrue(chore3_group1.isScheduled()); assertTrue(chore1_group2.isScheduled()); assertTrue(chore2_group2.isScheduled()); assertTrue(chore3_group2.isScheduled()); stopperForGroup1.stop("test stopping group 1"); Thread.sleep(period); assertFalse(chore1_group1.isScheduled()); assertFalse(chore2_group1.isScheduled()); assertFalse(chore3_group1.isScheduled()); assertTrue(chore1_group2.isScheduled()); assertTrue(chore2_group2.isScheduled()); assertTrue(chore3_group2.isScheduled()); stopperForGroup2.stop("test stopping group 2"); Thread.sleep(period); assertFalse(chore1_group1.isScheduled()); assertFalse(chore2_group1.isScheduled()); assertFalse(chore3_group1.isScheduled()); assertFalse(chore1_group2.isScheduled()); assertFalse(chore2_group2.isScheduled()); assertFalse(chore3_group2.isScheduled()); } finally { shutdownService(service); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=20000) public void testNumberOfChoresMissingStartTime() throws InterruptedException { ChoreService service=new ChoreService("testNumberOfChoresMissingStartTime"); final int period=100; final int sleepTime=5 * period; try { SlowChore sc1=new SlowChore("sc1",period); SlowChore sc2=new SlowChore("sc2",period); SlowChore sc3=new SlowChore("sc3",period); SlowChore sc4=new SlowChore("sc4",period); SlowChore sc5=new SlowChore("sc5",period); service.scheduleChore(sc1); service.scheduleChore(sc2); service.scheduleChore(sc3); service.scheduleChore(sc4); service.scheduleChore(sc5); Thread.sleep(sleepTime); assertEquals(5,service.getNumberOfChoresMissingStartTime()); sc1.cancel(); Thread.sleep(sleepTime); assertEquals(4,service.getNumberOfChoresMissingStartTime()); sc2.cancel(); sc3.cancel(); sc4.cancel(); Thread.sleep(sleepTime); assertEquals(1,service.getNumberOfChoresMissingStartTime()); sc5.cancel(); Thread.sleep(sleepTime); assertEquals(0,service.getNumberOfChoresMissingStartTime()); } finally { shutdownService(service); } }

Class: org.apache.hadoop.hbase.TestClassFinder

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClassFinderFiltersByNameInJar() throws Exception { final long counter=testCounter.incrementAndGet(); final String classNamePrefix=name.getMethodName(); LOG.info("Created jar " + createAndLoadJar("",classNamePrefix,counter)); ClassFinder.FileNameFilter notExcNameFilter=new ClassFinder.FileNameFilter(){ @Override public boolean isCandidateFile( String fileName, String absFilePath){ return !fileName.startsWith(PREFIX); } } ; ClassFinder incClassesFinder=new ClassFinder(null,notExcNameFilter,null); Set> incClasses=incClassesFinder.findClasses(makePackageName("",counter),false); assertEquals(1,incClasses.size()); Class incClass=makeClass("",classNamePrefix,counter); assertTrue(incClasses.contains(incClass)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testClassFinderCanFindClassesInJars() throws Exception { long counter=testCounter.incrementAndGet(); FileAndPath c1=compileTestClass(counter,"","c1"); FileAndPath c2=compileTestClass(counter,".nested","c2"); FileAndPath c3=compileTestClass(counter,"","c3"); packageAndLoadJar(c1,c3); packageAndLoadJar(c2); ClassFinder allClassesFinder=new ClassFinder(); Set> allClasses=allClassesFinder.findClasses(makePackageName("",counter),false); assertEquals(3,allClasses.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClassFinderFiltersByPathInJar() throws Exception { final String CLASSNAME=name.getMethodName(); long counter=testCounter.incrementAndGet(); FileAndPath c1=compileTestClass(counter,"",CLASSNAME); FileAndPath c2=compileTestClass(counter,"","c2"); packageAndLoadJar(c1); final String excludedJar=packageAndLoadJar(c2); final String excludedJarResource=new File(excludedJar).toURI().getRawSchemeSpecificPart(); final ClassFinder.ResourcePathFilter notExcJarFilter=new ClassFinder.ResourcePathFilter(){ @Override public boolean isCandidatePath( String resourcePath, boolean isJar){ return !isJar || !resourcePath.equals(excludedJarResource); } } ; ClassFinder incClassesFinder=new ClassFinder(notExcJarFilter,null,null); Set> incClasses=incClassesFinder.findClasses(makePackageName("",counter),false); assertEquals(1,incClasses.size()); Class incClass=makeClass("",CLASSNAME,counter); assertTrue(incClasses.contains(incClass)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testClassFinderFiltersByPathInDirs() throws Exception { final String hardcodedThisSubdir="hbase-common"; final ClassFinder.ResourcePathFilter notExcJarFilter=new ClassFinder.ResourcePathFilter(){ @Override public boolean isCandidatePath( String resourcePath, boolean isJar){ return isJar || !resourcePath.contains(hardcodedThisSubdir); } } ; String thisPackage=this.getClass().getPackage().getName(); ClassFinder notThisClassFinder=new ClassFinder(notExcJarFilter,null,null); Set> notAllClasses=notThisClassFinder.findClasses(thisPackage,false); assertFalse(notAllClasses.contains(this.getClass())); }

InternalCallVerifier EqualityVerifier 
@Test public void testClassFinderDefaultsToOwnPackage() throws Exception { ClassFinder allClassesFinder=new ClassFinder(); Set> pkgClasses=allClassesFinder.findClasses(ClassFinder.class.getPackage().getName(),false); Set> defaultClasses=allClassesFinder.findClasses(false); assertArrayEquals(pkgClasses.toArray(),defaultClasses.toArray()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClassFinderFiltersByClassInDirs() throws Exception { final long counter=testCounter.incrementAndGet(); final String classNamePrefix=name.getMethodName(); String pkgNameSuffix=name.getMethodName(); LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix,classNamePrefix,counter)); final Class clazz=makeClass(pkgNameSuffix,classNamePrefix,counter); final ClassFinder.ClassFilter notThisFilter=new ClassFinder.ClassFilter(){ @Override public boolean isCandidateClass( Class c){ return c != clazz; } } ; String pkgName=makePackageName(pkgNameSuffix,counter); ClassFinder allClassesFinder=new ClassFinder(); Set> allClasses=allClassesFinder.findClasses(pkgName,false); assertTrue("Classes in " + pkgName,allClasses.size() > 0); ClassFinder notThisClassFinder=new ClassFinder(null,null,notThisFilter); Set> notAllClasses=notThisClassFinder.findClasses(pkgName,false); assertFalse(contains(notAllClasses,clazz.getSimpleName())); assertEquals(allClasses.size() - 1,notAllClasses.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClassFinderHandlesNestedPackages() throws Exception { final String NESTED=".nested"; final String CLASSNAME1=name.getMethodName() + "1"; final String CLASSNAME2=name.getMethodName() + "2"; long counter=testCounter.incrementAndGet(); FileAndPath c1=compileTestClass(counter,"","c1"); FileAndPath c2=compileTestClass(counter,NESTED,CLASSNAME1); FileAndPath c3=compileTestClass(counter,NESTED,CLASSNAME2); packageAndLoadJar(c1,c2); packageAndLoadJar(c3); ClassFinder allClassesFinder=new ClassFinder(); Set> nestedClasses=allClassesFinder.findClasses(makePackageName(NESTED,counter),false); assertEquals(2,nestedClasses.size()); Class nestedClass1=makeClass(NESTED,CLASSNAME1,counter); assertTrue(nestedClasses.contains(nestedClass1)); Class nestedClass2=makeClass(NESTED,CLASSNAME2,counter); assertTrue(nestedClasses.contains(nestedClass2)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testClassFinderHandlesConflicts() throws Exception { long counter=testCounter.incrementAndGet(); FileAndPath c1=compileTestClass(counter,"","c1"); FileAndPath c2=compileTestClass(counter,"","c2"); packageAndLoadJar(c1,c2); packageAndLoadJar(c1); ClassFinder allClassesFinder=new ClassFinder(); Set> allClasses=allClassesFinder.findClasses(makePackageName("",counter),false); assertEquals(2,allClasses.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClassFinderFiltersByNameInDirs() throws Exception { final long counter=testCounter.incrementAndGet(); final String classNamePrefix=name.getMethodName(); String pkgNameSuffix=name.getMethodName(); LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix,classNamePrefix,counter)); final String classNameToFilterOut=classNamePrefix + counter; final ClassFinder.FileNameFilter notThisFilter=new ClassFinder.FileNameFilter(){ @Override public boolean isCandidateFile( String fileName, String absFilePath){ return !fileName.equals(classNameToFilterOut + ".class"); } } ; String pkgName=makePackageName(pkgNameSuffix,counter); ClassFinder allClassesFinder=new ClassFinder(); Set> allClasses=allClassesFinder.findClasses(pkgName,false); assertTrue("Classes in " + pkgName,allClasses.size() > 0); ClassFinder notThisClassFinder=new ClassFinder(null,notThisFilter,null); Set> notAllClasses=notThisClassFinder.findClasses(pkgName,false); assertFalse(contains(notAllClasses,classNameToFilterOut)); assertEquals(allClasses.size() - 1,notAllClasses.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClassFinderFiltersByClassInJar() throws Exception { final long counter=testCounter.incrementAndGet(); final String classNamePrefix=name.getMethodName(); LOG.info("Created jar " + createAndLoadJar("",classNamePrefix,counter)); final ClassFinder.ClassFilter notExcClassFilter=new ClassFinder.ClassFilter(){ @Override public boolean isCandidateClass( Class c){ return !c.getSimpleName().startsWith(PREFIX); } } ; ClassFinder incClassesFinder=new ClassFinder(null,null,notExcClassFilter); Set> incClasses=incClassesFinder.findClasses(makePackageName("",counter),false); assertEquals(1,incClasses.size()); Class incClass=makeClass("",classNamePrefix,counter); assertTrue(incClasses.contains(incClass)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testClassFinderCanFindClassesInDirs() throws Exception { final long counter=testCounter.incrementAndGet(); final String classNamePrefix=name.getMethodName(); String pkgNameSuffix=name.getMethodName(); LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix,classNamePrefix,counter)); ClassFinder allClassesFinder=new ClassFinder(); String pkgName=makePackageName(pkgNameSuffix,counter); Set> allClasses=allClassesFinder.findClasses(pkgName,false); assertTrue("Classes in " + pkgName,allClasses.size() > 0); String classNameToFind=classNamePrefix + counter; assertTrue(contains(allClasses,classNameToFind)); }

Class: org.apache.hadoop.hbase.TestCompoundConfiguration

APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLaterConfigsOverrideEarlier(){ Map map1=new HashMap(); map1.put("A","2"); map1.put("D","5"); Map map2=new HashMap(); String newValueForA="3", newValueForB="4"; map2.put("A",newValueForA); map2.put("B",newValueForB); CompoundConfiguration compoundConf=new CompoundConfiguration().addStringMap(map1).add(baseConf); assertEquals("1",compoundConf.get("A")); assertEquals("5",compoundConf.get("D")); compoundConf.addStringMap(map2); assertEquals(newValueForA,compoundConf.get("A")); assertEquals(newValueForB,compoundConf.get("B")); assertEquals("5",compoundConf.get("D")); int cnt=0; for ( Map.Entry entry : compoundConf) { cnt++; if (entry.getKey().equals("A")) assertEquals(newValueForA,entry.getValue()); else if (entry.getKey().equals("B")) assertEquals(newValueForB,entry.getValue()); } assertEquals(baseConfSize + 1,cnt); }

BranchVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWithStringMap(){ Map map=new HashMap(); map.put("B","2b"); map.put("C","33"); map.put("D","4"); map.put("G",null); CompoundConfiguration compoundConf=new CompoundConfiguration().addStringMap(map); assertEquals("2b",compoundConf.get("B")); assertEquals(33,compoundConf.getInt("C",0)); assertEquals("4",compoundConf.get("D")); assertEquals(4,compoundConf.getInt("D",0)); assertNull(compoundConf.get("E")); assertEquals(6,compoundConf.getInt("F",6)); assertNull(compoundConf.get("G")); int cnt=0; for ( Map.Entry entry : compoundConf) { cnt++; if (entry.getKey().equals("B")) assertEquals("2b",entry.getValue()); else if (entry.getKey().equals("G")) assertEquals(null,entry.getValue()); } assertEquals(4,cnt); CompoundConfiguration conf2=new CompoundConfiguration(); conf2.set("X","modification"); conf2.set("D","not4"); assertEquals("modification",conf2.get("X")); assertEquals("not4",conf2.get("D")); conf2.addStringMap(map); assertEquals("4",conf2.get("D")); }

APIUtilityVerifier BranchVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWithIbwMap(){ Map map=new HashMap(); map.put(strToIb("B"),strToIb("2b")); map.put(strToIb("C"),strToIb("33")); map.put(strToIb("D"),strToIb("4")); map.put(strToIb("G"),null); CompoundConfiguration compoundConf=new CompoundConfiguration().add(baseConf).addBytesMap(map); assertEquals("1",compoundConf.get("A")); assertEquals("2b",compoundConf.get("B")); assertEquals(33,compoundConf.getInt("C",0)); assertEquals("4",compoundConf.get("D")); assertEquals(4,compoundConf.getInt("D",0)); assertNull(compoundConf.get("E")); assertEquals(6,compoundConf.getInt("F",6)); assertNull(compoundConf.get("G")); int cnt=0; for ( Map.Entry entry : compoundConf) { cnt++; if (entry.getKey().equals("B")) assertEquals("2b",entry.getValue()); else if (entry.getKey().equals("G")) assertEquals(null,entry.getValue()); } assertEquals(baseConfSize + 2,cnt); CompoundConfiguration conf2=new CompoundConfiguration(); conf2.set("X","modification"); conf2.set("D","not4"); assertEquals("modification",conf2.get("X")); assertEquals("not4",conf2.get("D")); conf2.addBytesMap(map); assertEquals("4",conf2.get("D")); }

APIUtilityVerifier BranchVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWithConfig(){ Configuration conf=new Configuration(); conf.set("B","2b"); conf.set("C","33"); conf.set("D","4"); CompoundConfiguration compoundConf=new CompoundConfiguration().add(baseConf).add(conf); assertEquals("1",compoundConf.get("A")); assertEquals("2b",compoundConf.get("B")); assertEquals(33,compoundConf.getInt("C",0)); assertEquals("4",compoundConf.get("D")); assertEquals(4,compoundConf.getInt("D",0)); assertNull(compoundConf.get("E")); assertEquals(6,compoundConf.getInt("F",6)); int cnt=0; for ( Map.Entry entry : compoundConf) { cnt++; if (entry.getKey().equals("B")) assertEquals("2b",entry.getValue()); else if (entry.getKey().equals("G")) assertEquals(null,entry.getValue()); } assertEquals(baseConfSize + 1,cnt); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testBasicFunctionality() throws ClassNotFoundException { CompoundConfiguration compoundConf=new CompoundConfiguration().add(baseConf); assertEquals("1",compoundConf.get("A")); assertEquals(2,compoundConf.getInt("B",0)); assertEquals(3,compoundConf.getInt("C",0)); assertEquals(0,compoundConf.getInt("D",0)); assertEquals(CompoundConfiguration.class,compoundConf.getClassByName(CompoundConfiguration.class.getName())); try { compoundConf.getClassByName("bad_class_name"); fail("Trying to load bad_class_name should throw an exception"); } catch ( ClassNotFoundException e) { } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPut(){ CompoundConfiguration compoundConf=new CompoundConfiguration().add(baseConf); assertEquals("1",compoundConf.get("A")); assertEquals(2,compoundConf.getInt("B",0)); assertEquals(3,compoundConf.getInt("C",0)); assertEquals(0,compoundConf.getInt("D",0)); compoundConf.set("A","1337"); compoundConf.set("string","stringvalue"); assertEquals(1337,compoundConf.getInt("A",0)); assertEquals("stringvalue",compoundConf.get("string")); assertEquals("1",baseConf.get("A")); assertNull(baseConf.get("string")); baseConf.set("setInParent","fromParent"); assertEquals("fromParent",compoundConf.get("setInParent")); }

Class: org.apache.hadoop.hbase.TestFSTableDescriptorForceCreation

InternalCallVerifier BooleanVerifier 
@Test public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse() throws IOException { final String name="testAlreadyExists"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); HTableDescriptor htd=new HTableDescriptor(name); fstd.add(htd); assertFalse("Should not create new table descriptor",fstd.createTableDescriptor(htd,false)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor() throws Exception { final String name="createNewTableNew2"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(name)); fstd.createTableDescriptor(htd,false); assertTrue("Should create new table descriptor",fstd.createTableDescriptor(htd,true)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse() throws IOException { final String name="newTable2"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(name)); assertTrue("Should create new table descriptor",fstd.createTableDescriptor(htd,false)); }

Class: org.apache.hadoop.hbase.TestFullLogReconstruction

InternalCallVerifier EqualityVerifier 
/** * Test the whole reconstruction loop. Build a table with regions aaa to zzz * and load every one of them multiple times with the same date and do a flush * at some point. Kill one of the region servers and scan the table. We should * see all the rows. * @throws Exception */ @Test(timeout=300000) public void testReconstruction() throws Exception { Table table=TEST_UTIL.createMultiRegionTable(TABLE_NAME,FAMILY); int initialCount=TEST_UTIL.loadTable(table,FAMILY); int count=TEST_UTIL.countRows(table); assertEquals(initialCount,count); for (int i=0; i < 4; i++) { TEST_UTIL.loadTable(table,FAMILY); } TEST_UTIL.expireRegionServerSession(0); int newCount=TEST_UTIL.countRows(table); assertEquals(count,newCount); table.close(); }

Class: org.apache.hadoop.hbase.TestGlobalMemStoreSize

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the global mem store size in the region server is equal to sum of each * region's mem store size * @throws Exception */ @Test public void testGlobalMemStore() throws Exception { LOG.info("Starting cluster"); Configuration conf=HBaseConfiguration.create(); TEST_UTIL=new HBaseTestingUtility(conf); TEST_UTIL.startMiniCluster(1,regionServerNum); cluster=TEST_UTIL.getHBaseCluster(); LOG.info("Waiting for active/ready master"); cluster.waitForActiveAndReadyMaster(); TableName table=TableName.valueOf("TestGlobalMemStoreSize"); byte[] family=Bytes.toBytes("family"); LOG.info("Creating table with " + regionNum + " regions"); Table ht=TEST_UTIL.createMultiRegionTable(table,family,regionNum); int numRegions=-1; try (RegionLocator r=TEST_UTIL.getConnection().getRegionLocator(table)){ numRegions=r.getStartKeys().length; } assertEquals(regionNum,numRegions); waitForAllRegionsAssigned(); for ( HRegionServer server : getOnlineRegionServers()) { long globalMemStoreSize=0; for ( HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { globalMemStoreSize+=server.getFromOnlineRegions(regionInfo.getEncodedName()).getMemstoreSize(); } assertEquals(server.getRegionServerAccounting().getGlobalMemstoreSize(),globalMemStoreSize); } int i=0; for ( HRegionServer server : getOnlineRegionServers()) { LOG.info("Starting flushes on " + server.getServerName() + ", size="+ server.getRegionServerAccounting().getGlobalMemstoreSize()); for ( HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { Region r=server.getFromOnlineRegions(regionInfo.getEncodedName()); flush(r,server); } LOG.info("Post flush on " + server.getServerName()); long now=System.currentTimeMillis(); long timeout=now + 1000; while (server.getRegionServerAccounting().getGlobalMemstoreSize() != 0 && timeout < System.currentTimeMillis()) { Threads.sleep(10); } long size=server.getRegionServerAccounting().getGlobalMemstoreSize(); if (size > 0) { for ( HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { Region r=server.getFromOnlineRegions(regionInfo.getEncodedName()); long l=r.getMemstoreSize(); if (l > 0) { assertTrue(regionInfo.isMetaRegion()); LOG.info(r.toString() + " " + l+ ", reflushing"); r.flush(true); } } } size=server.getRegionServerAccounting().getGlobalMemstoreSize(); assertEquals("Server=" + server.getServerName() + ", i="+ i++,0,size); } ht.close(); TEST_UTIL.shutdownMiniCluster(); }

Class: org.apache.hadoop.hbase.TestHBaseConfiguration

APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetPassword() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.set(ReflectiveCredentialProviderClient.CREDENTIAL_PROVIDER_PATH,"jceks://file/tmp/foo.jks"); ReflectiveCredentialProviderClient client=new ReflectiveCredentialProviderClient(); if (client.isHadoopCredentialProviderAvailable()) { char[] keyPass={'k','e','y','p','a','s','s'}; char[] storePass={'s','t','o','r','e','p','a','s','s'}; client.createEntry(conf,"ssl.keypass.alias",keyPass); client.createEntry(conf,"ssl.storepass.alias",storePass); String keypass=HBaseConfiguration.getPassword(conf,"ssl.keypass.alias",null); assertEquals(keypass,new String(keyPass)); String storepass=HBaseConfiguration.getPassword(conf,"ssl.storepass.alias",null); assertEquals(storepass,new String(storePass)); } }

InternalCallVerifier EqualityVerifier 
@Test public void testGetIntDeprecated(){ int VAL=1, VAL2=2; String NAME="foo"; String DEPRECATED_NAME="foo.deprecated"; Configuration conf=HBaseConfiguration.create(); conf.setInt(NAME,VAL); assertEquals(VAL,HBaseConfiguration.getInt(conf,NAME,DEPRECATED_NAME,0)); conf=HBaseConfiguration.create(); conf.setInt(DEPRECATED_NAME,VAL); assertEquals(VAL,HBaseConfiguration.getInt(conf,NAME,DEPRECATED_NAME,0)); conf=HBaseConfiguration.create(); conf.setInt(DEPRECATED_NAME,VAL); conf.setInt(NAME,VAL); assertEquals(VAL,HBaseConfiguration.getInt(conf,NAME,DEPRECATED_NAME,0)); conf=HBaseConfiguration.create(); conf.setInt(DEPRECATED_NAME,VAL); conf.setInt(NAME,VAL2); assertEquals(VAL,HBaseConfiguration.getInt(conf,NAME,DEPRECATED_NAME,0)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSubset() throws Exception { Configuration conf=HBaseConfiguration.create(); String prefix="hbase.mapred.output."; conf.set("hbase.security.authentication","kerberos"); conf.set("hbase.regionserver.kerberos.principal","hbasesource"); HBaseConfiguration.setWithPrefix(conf,prefix,ImmutableMap.of("hbase.regionserver.kerberos.principal","hbasedest","","shouldbemissing").entrySet()); Configuration subsetConf=HBaseConfiguration.subset(conf,prefix); assertNull(subsetConf.get(prefix + "hbase.regionserver.kerberos.principal")); assertEquals("hbasedest",subsetConf.get("hbase.regionserver.kerberos.principal")); assertNull(subsetConf.get("hbase.security.authentication")); assertNull(subsetConf.get("")); Configuration mergedConf=HBaseConfiguration.create(conf); HBaseConfiguration.merge(mergedConf,subsetConf); assertEquals("hbasedest",mergedConf.get("hbase.regionserver.kerberos.principal")); assertEquals("kerberos",mergedConf.get("hbase.security.authentication")); assertEquals("shouldbemissing",mergedConf.get(prefix)); }

Class: org.apache.hadoop.hbase.TestHBaseOnOtherDfsCluster

InternalCallVerifier BooleanVerifier 
@Test public void testOveralyOnOtherCluster() throws Exception { HBaseTestingUtility util1=new HBaseTestingUtility(); MiniDFSCluster dfs=util1.startMiniDFSCluster(1); HBaseTestingUtility util2=new HBaseTestingUtility(); util2.setDFSCluster(dfs,false); util2.startMiniCluster(); FileSystem fs=dfs.getFileSystem(); FileSystem targetFs=util2.getDFSCluster().getFileSystem(); assertFsSameUri(fs,targetFs); fs=FileSystem.get(util1.getConfiguration()); targetFs=FileSystem.get(util2.getConfiguration()); assertFsSameUri(fs,targetFs); Path randomFile=new Path("/" + UUID.randomUUID()); assertTrue(targetFs.createNewFile(randomFile)); assertTrue(fs.exists(randomFile)); byte[] family=Bytes.toBytes("testfamily"); TableName tablename=TableName.valueOf("testtable"); Table table=util2.createTable(tablename,family); Put p=new Put(new byte[]{1,2,3}); p.addColumn(family,null,new byte[]{1}); table.put(p); util2.shutdownMiniCluster(); util1.shutdownMiniDFSCluster(); }

Class: org.apache.hadoop.hbase.TestHBaseTestingUtility

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMiniZooKeeperWithOneServer() throws Exception { HBaseTestingUtility hbt=new HBaseTestingUtility(); MiniZooKeeperCluster cluster1=hbt.startMiniZKCluster(); try { assertEquals(0,cluster1.getBackupZooKeeperServerNum()); assertTrue((cluster1.killCurrentActiveZooKeeperServer() == -1)); } finally { hbt.shutdownMiniZKCluster(); } }

InternalCallVerifier BooleanVerifier 
@Test public void testSetupClusterTestBuildDir() throws Exception { HBaseTestingUtility hbt=new HBaseTestingUtility(); Path testdir=hbt.getClusterTestDir(); LOG.info("uuid-subdir=" + testdir); FileSystem fs=hbt.getTestFileSystem(); assertFalse(fs.exists(testdir)); hbt.startMiniDFSCluster(null); assertTrue(fs.exists(testdir)); hbt.shutdownMiniCluster(); assertFalse(fs.exists(testdir)); }

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePortConflict() throws Exception { Random random=Mockito.mock(Random.class); Mockito.when(random.nextInt(Mockito.any(Integer.class))).thenAnswer(new Answer(){ int[] numbers={1,1,2}; int count=0; @Override public Integer answer( InvocationOnMock invocation){ int ret=numbers[count]; count++; return ret; } } ); HBaseTestingUtility.PortAllocator.AvailablePortChecker portChecker=Mockito.mock(HBaseTestingUtility.PortAllocator.AvailablePortChecker.class); Mockito.when(portChecker.available(Mockito.any(Integer.class))).thenReturn(true); HBaseTestingUtility.PortAllocator portAllocator=new HBaseTestingUtility.PortAllocator(random,portChecker); int port1=portAllocator.randomFreePort(); int port2=portAllocator.randomFreePort(); assertNotEquals(port1,port2); Mockito.verify(random,Mockito.times(3)).nextInt(Mockito.any(Integer.class)); }

InternalCallVerifier EqualityVerifier 
/** * Basic sanity test that spins up multiple HDFS and HBase clusters that share * the same ZK ensemble. We then create the same table in both and make sure * that what we insert in one place doesn't end up in the other. * @throws Exception */ @Test(timeout=180000) public void testMultiClusters() throws Exception { HBaseTestingUtility htu1=new HBaseTestingUtility(); htu1.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT,"/1"); htu1.startMiniZKCluster(); HBaseTestingUtility htu2=new HBaseTestingUtility(); htu2.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT,"/2"); htu2.getConfiguration().set(HConstants.ZOOKEEPER_CLIENT_PORT,htu1.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT,"-1")); htu2.setZkCluster(htu1.getZkCluster()); HBaseTestingUtility htu3=new HBaseTestingUtility(); htu3.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT,"/3"); htu3.getConfiguration().set(HConstants.ZOOKEEPER_CLIENT_PORT,htu1.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT,"-1")); htu3.setZkCluster(htu1.getZkCluster()); try { htu1.startMiniCluster(); htu2.startMiniCluster(); htu3.startMiniCluster(); final TableName TABLE_NAME=TableName.valueOf("test"); final byte[] FAM_NAME=Bytes.toBytes("fam"); final byte[] ROW=Bytes.toBytes("row"); final byte[] QUAL_NAME=Bytes.toBytes("qual"); final byte[] VALUE=Bytes.toBytes("value"); Table table1=htu1.createTable(TABLE_NAME,FAM_NAME); Table table2=htu2.createTable(TABLE_NAME,FAM_NAME); Put put=new Put(ROW); put.addColumn(FAM_NAME,QUAL_NAME,VALUE); table1.put(put); Get get=new Get(ROW); get.addColumn(FAM_NAME,QUAL_NAME); Result res=table1.get(get); assertEquals(1,res.size()); res=table2.get(get); assertEquals(0,res.size()); table1.close(); table2.close(); } finally { htu3.shutdownMiniCluster(); htu2.shutdownMiniCluster(); htu1.shutdownMiniCluster(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testMiniClusterWithSSLOn() throws Exception { final String BASEDIR=System.getProperty("test.build.dir","target/test-dir") + "/" + TestHBaseTestingUtility.class.getSimpleName(); String sslConfDir=KeyStoreTestUtil.getClasspathDir(TestHBaseTestingUtility.class); String keystoresDir=new File(BASEDIR).getAbsolutePath(); HBaseTestingUtility hbt=new HBaseTestingUtility(); File base=new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); KeyStoreTestUtil.setupSSLConfig(keystoresDir,sslConfDir,hbt.getConfiguration(),false); hbt.getConfiguration().set("hbase.ssl.enabled","true"); hbt.getConfiguration().addResource("ssl-server.xml"); hbt.getConfiguration().addResource("ssl-client.xml"); MiniHBaseCluster cluster=hbt.startMiniCluster(); try { assertEquals(1,cluster.getLiveRegionServerThreads().size()); } finally { hbt.shutdownMiniCluster(); } }

InternalCallVerifier BooleanVerifier 
/** * Test that we can start and stop multiple time a cluster * with the same HBaseTestingUtility. */ @Test public void testMultipleStartStop() throws Exception { HBaseTestingUtility htu1=new HBaseTestingUtility(); Path foo=new Path("foo"); htu1.startMiniCluster(); htu1.getDFSCluster().getFileSystem().create(foo); assertTrue(htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.shutdownMiniCluster(); htu1.startMiniCluster(); assertFalse(htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.getDFSCluster().getFileSystem().create(foo); assertTrue(htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.shutdownMiniCluster(); }

InternalCallVerifier BooleanVerifier 
@Test public void testMiniDFSCluster() throws Exception { HBaseTestingUtility hbt=new HBaseTestingUtility(); MiniDFSCluster cluster=hbt.startMiniDFSCluster(null); FileSystem dfs=cluster.getFileSystem(); Path dir=new Path("dir"); Path qualifiedDir=dfs.makeQualified(dir); LOG.info("dir=" + dir + ", qualifiedDir="+ qualifiedDir); assertFalse(dfs.exists(qualifiedDir)); assertTrue(dfs.mkdirs(qualifiedDir)); assertTrue(dfs.delete(qualifiedDir,true)); hbt.shutdownMiniCluster(); }

InternalCallVerifier BooleanVerifier 
@Test public void testTestDir() throws Exception { HBaseTestingUtility hbt=new HBaseTestingUtility(); Path testdir=hbt.getDataTestDir(); LOG.info("testdir=" + testdir); FileSystem fs=hbt.getTestFileSystem(); assertTrue(!fs.exists(testdir)); assertTrue(fs.mkdirs(testdir)); assertTrue(hbt.cleanupTestDir()); }

InternalCallVerifier EqualityVerifier 
@Test public void testMiniCluster() throws Exception { HBaseTestingUtility hbt=new HBaseTestingUtility(); MiniHBaseCluster cluster=hbt.startMiniCluster(); try { assertEquals(1,cluster.getLiveRegionServerThreads().size()); } finally { hbt.shutdownMiniCluster(); } }

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testMiniZooKeeperWithMultipleClientPorts() throws Exception { int defaultClientPort=8888; int i, j; HBaseTestingUtility hbt=new HBaseTestingUtility(); int[] clientPortList1={1111,1112,1113}; MiniZooKeeperCluster cluster1=hbt.startMiniZKCluster(clientPortList1.length,clientPortList1); try { List clientPortListInCluster=cluster1.getClientPortList(); for (i=0; i < clientPortListInCluster.size(); i++) { assertEquals(clientPortListInCluster.get(i).intValue(),clientPortList1[i]); } } finally { hbt.shutdownMiniZKCluster(); } hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort",defaultClientPort); int[] clientPortList2={2222,2223}; MiniZooKeeperCluster cluster2=hbt.startMiniZKCluster(clientPortList2.length + 2,clientPortList2); try { List clientPortListInCluster=cluster2.getClientPortList(); for (i=0, j=0; i < clientPortListInCluster.size(); i++) { if (i < clientPortList2.length) { assertEquals(clientPortListInCluster.get(i).intValue(),clientPortList2[i]); } else { assertEquals(clientPortListInCluster.get(i).intValue(),defaultClientPort + j); j++; } } } finally { hbt.shutdownMiniZKCluster(); } hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort",defaultClientPort); int[] clientPortList3={3333,-3334,3335,0}; MiniZooKeeperCluster cluster3=hbt.startMiniZKCluster(clientPortList3.length + 1,clientPortList3); try { List clientPortListInCluster=cluster3.getClientPortList(); for (i=0, j=0; i < clientPortListInCluster.size(); i++) { if (i < clientPortList3.length && clientPortList3[i] > 0) { assertEquals(clientPortListInCluster.get(i).intValue(),clientPortList3[i]); } else { assertEquals(clientPortListInCluster.get(i).intValue(),defaultClientPort + j); j++; } } } finally { hbt.shutdownMiniZKCluster(); } hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort",defaultClientPort); int[] clientPortList4={-4444,defaultClientPort + 2,4446,defaultClientPort}; MiniZooKeeperCluster cluster4=hbt.startMiniZKCluster(clientPortList4.length + 1,clientPortList4); try { List clientPortListInCluster=cluster4.getClientPortList(); for (i=0, j=1; i < clientPortListInCluster.size(); i++) { if (i < clientPortList4.length && clientPortList4[i] > 0) { assertEquals(clientPortListInCluster.get(i).intValue(),clientPortList4[i]); } else { assertEquals(clientPortListInCluster.get(i).intValue(),defaultClientPort + j); j+=2; } } } finally { hbt.shutdownMiniZKCluster(); } int[] clientPortList5={5555,5556,5556}; try { MiniZooKeeperCluster cluster5=hbt.startMiniZKCluster(clientPortList5.length,clientPortList5); assertTrue(cluster5.getClientPort() == -1); } catch ( Exception e) { } finally { hbt.shutdownMiniZKCluster(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMiniZooKeeperWithMultipleServers() throws Exception { HBaseTestingUtility hbt=new HBaseTestingUtility(); MiniZooKeeperCluster cluster2=hbt.startMiniZKCluster(5); int defaultClientPort=21818; cluster2.setDefaultClientPort(defaultClientPort); try { assertEquals(4,cluster2.getBackupZooKeeperServerNum()); int currentActivePort=cluster2.killCurrentActiveZooKeeperServer(); assertTrue(currentActivePort >= defaultClientPort); assertTrue(cluster2.getClientPort() == currentActivePort); currentActivePort=cluster2.killCurrentActiveZooKeeperServer(); assertTrue(currentActivePort >= defaultClientPort); assertTrue(cluster2.getClientPort() == currentActivePort); assertEquals(2,cluster2.getBackupZooKeeperServerNum()); assertEquals(3,cluster2.getZooKeeperServerNum()); cluster2.killOneBackupZooKeeperServer(); cluster2.killOneBackupZooKeeperServer(); assertEquals(0,cluster2.getBackupZooKeeperServerNum()); assertEquals(1,cluster2.getZooKeeperServerNum()); currentActivePort=cluster2.killCurrentActiveZooKeeperServer(); assertTrue(currentActivePort == -1); assertTrue(cluster2.getClientPort() == currentActivePort); cluster2.killOneBackupZooKeeperServer(); assertEquals(-1,cluster2.getBackupZooKeeperServerNum()); assertEquals(0,cluster2.getZooKeeperServerNum()); } finally { hbt.shutdownMiniZKCluster(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testMiniClusterBindToWildcard() throws Exception { HBaseTestingUtility hbt=new HBaseTestingUtility(); hbt.getConfiguration().set("hbase.regionserver.ipc.address","0.0.0.0"); MiniHBaseCluster cluster=hbt.startMiniCluster(); try { assertEquals(1,cluster.getLiveRegionServerThreads().size()); } finally { hbt.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.TestHColumnDescriptor

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPb() throws DeserializationException { HColumnDescriptor hcd=new HColumnDescriptor(new HColumnDescriptor(HConstants.CATALOG_FAMILY).setInMemory(true).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true)); final int v=123; hcd.setBlocksize(v); hcd.setTimeToLive(v); hcd.setBlockCacheEnabled(!HColumnDescriptor.DEFAULT_BLOCKCACHE); hcd.setValue("a","b"); hcd.setMaxVersions(v); assertEquals(v,hcd.getMaxVersions()); hcd.setMinVersions(v); assertEquals(v,hcd.getMinVersions()); hcd.setKeepDeletedCells(KeepDeletedCells.TRUE); hcd.setInMemory(!HColumnDescriptor.DEFAULT_IN_MEMORY); boolean inmemory=hcd.isInMemory(); hcd.setScope(v); hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); hcd.setBloomFilterType(BloomType.ROW); hcd.setCompressionType(Algorithm.SNAPPY); hcd.setMobEnabled(true); hcd.setMobThreshold(1000L); hcd.setDFSReplication((short)v); byte[] bytes=hcd.toByteArray(); HColumnDescriptor deserializedHcd=HColumnDescriptor.parseFrom(bytes); assertTrue(hcd.equals(deserializedHcd)); assertEquals(v,hcd.getBlocksize()); assertEquals(v,hcd.getTimeToLive()); assertEquals(hcd.getValue("a"),deserializedHcd.getValue("a")); assertEquals(hcd.getMaxVersions(),deserializedHcd.getMaxVersions()); assertEquals(hcd.getMinVersions(),deserializedHcd.getMinVersions()); assertEquals(hcd.getKeepDeletedCells(),deserializedHcd.getKeepDeletedCells()); assertEquals(inmemory,deserializedHcd.isInMemory()); assertEquals(hcd.getScope(),deserializedHcd.getScope()); assertTrue(deserializedHcd.getCompressionType().equals(Compression.Algorithm.SNAPPY)); assertTrue(deserializedHcd.getDataBlockEncoding().equals(DataBlockEncoding.FAST_DIFF)); assertTrue(deserializedHcd.getBloomFilterType().equals(BloomType.ROW)); assertEquals(hcd.isMobEnabled(),deserializedHcd.isMobEnabled()); assertEquals(hcd.getMobThreshold(),deserializedHcd.getMobThreshold()); assertEquals(v,deserializedHcd.getDFSReplication()); }

InternalCallVerifier EqualityVerifier 
/** * Test that we add and remove strings from configuration properly. */ @Test public void testAddGetRemoveConfiguration() throws Exception { HColumnDescriptor desc=new HColumnDescriptor("foo"); String key="Some"; String value="value"; desc.setConfiguration(key,value); assertEquals(value,desc.getConfigurationValue(key)); desc.removeConfiguration(key); assertEquals(null,desc.getConfigurationValue(key)); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetTimeToLive() throws HBaseException { String ttl; HColumnDescriptor desc=new HColumnDescriptor("foo"); ttl="50000"; desc.setTimeToLive(ttl); Assert.assertEquals(50000,desc.getTimeToLive()); ttl="50000 seconds"; desc.setTimeToLive(ttl); Assert.assertEquals(50000,desc.getTimeToLive()); ttl=""; desc.setTimeToLive(ttl); Assert.assertEquals(0,desc.getTimeToLive()); ttl="FOREVER"; desc.setTimeToLive(ttl); Assert.assertEquals(HConstants.FOREVER,desc.getTimeToLive()); ttl="1 HOUR 10 minutes 1 second"; desc.setTimeToLive(ttl); Assert.assertEquals(4201,desc.getTimeToLive()); ttl="500 Days 23 HOURS"; desc.setTimeToLive(ttl); Assert.assertEquals(43282800,desc.getTimeToLive()); ttl="43282800 SECONDS (500 Days 23 hours)"; desc.setTimeToLive(ttl); Assert.assertEquals(43282800,desc.getTimeToLive()); }

Class: org.apache.hadoop.hbase.TestHColumnDescriptorDefaultVersions

InternalCallVerifier EqualityVerifier 
@Test public void testHColumnDescriptorCachedMaxVersions() throws Exception { HColumnDescriptor hcd=new HColumnDescriptor(FAMILY); hcd.setMaxVersions(5); assertEquals(5,hcd.getMaxVersions()); hcd.setValue(Bytes.toBytes(HConstants.VERSIONS),Bytes.toBytes("8")); assertEquals(8,hcd.getMaxVersions()); }

Class: org.apache.hadoop.hbase.TestHDFSBlocksDistribution

InternalCallVerifier EqualityVerifier 
@Test public void testAddHostsAndBlockWeight() throws Exception { HDFSBlocksDistribution distribution=new HDFSBlocksDistribution(); distribution.addHostsAndBlockWeight(null,100); assertEquals("Expecting no hosts weights",0,distribution.getHostAndWeights().size()); distribution.addHostsAndBlockWeight(new String[0],100); assertEquals("Expecting no hosts weights",0,distribution.getHostAndWeights().size()); distribution.addHostsAndBlockWeight(new String[]{"test"},101); assertEquals("Should be one host",1,distribution.getHostAndWeights().size()); distribution.addHostsAndBlockWeight(new String[]{"test"},202); assertEquals("Should be one host",1,distribution.getHostAndWeights().size()); assertEquals("test host should have weight 303",303,distribution.getHostAndWeights().get("test").getWeight()); distribution.addHostsAndBlockWeight(new String[]{"testTwo"},222); assertEquals("Should be two hosts",2,distribution.getHostAndWeights().size()); assertEquals("Total weight should be 525",525,distribution.getUniqueBlocksTotalWeight()); }

InternalCallVerifier EqualityVerifier 
@Test public void testAdd() throws Exception { HDFSBlocksDistribution distribution=new HDFSBlocksDistribution(); distribution.add(new MockHDFSBlocksDistribution()); assertEquals("Expecting no hosts weights",0,distribution.getHostAndWeights().size()); distribution.addHostsAndBlockWeight(new String[]{"test"},10); assertEquals("Should be one host",1,distribution.getHostAndWeights().size()); distribution.add(new MockHDFSBlocksDistribution()); assertEquals("Should be one host",1,distribution.getHostAndWeights().size()); assertEquals("Total weight should be 10",10,distribution.getUniqueBlocksTotalWeight()); }

Class: org.apache.hadoop.hbase.TestHRegionLocation

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * HRegionLocations are equal if they have the same 'location' -- i.e. host and * port -- even if they are carrying different regions. Verify that is indeed * the case. */ @Test public void testHashAndEqualsCode(){ ServerName hsa1=ServerName.valueOf("localhost",1234,-1L); HRegionLocation hrl1=new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,hsa1); HRegionLocation hrl2=new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,hsa1); assertEquals(hrl1.hashCode(),hrl2.hashCode()); assertTrue(hrl1.equals(hrl2)); HRegionLocation hrl3=new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,hsa1); assertNotSame(hrl1,hrl3); assertTrue(hrl1.equals(hrl3)); ServerName hsa2=ServerName.valueOf("localhost",12345,-1L); HRegionLocation hrl4=new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,hsa2); assertFalse(hrl3.equals(hrl4)); HRegionLocation hrl5=new HRegionLocation(hrl4.getRegionInfo(),hrl4.getServerName(),hrl4.getSeqNum() + 1); assertTrue(hrl4.equals(hrl5)); }

InternalCallVerifier BooleanVerifier 
@Test public void testCompareTo(){ ServerName hsa1=ServerName.valueOf("localhost",1234,-1L); HRegionLocation hsl1=new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,hsa1); ServerName hsa2=ServerName.valueOf("localhost",1235,-1L); HRegionLocation hsl2=new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,hsa2); assertTrue(hsl1.compareTo(hsl1) == 0); assertTrue(hsl2.compareTo(hsl2) == 0); int compare1=hsl1.compareTo(hsl2); int compare2=hsl2.compareTo(hsl1); assertTrue((compare1 > 0) ? compare2 < 0 : compare2 > 0); }

Class: org.apache.hadoop.hbase.TestHTableDescriptor

InternalCallVerifier EqualityVerifier 
/** * Test that we add and remove strings from configuration properly. */ @Test public void testAddGetRemoveConfiguration() throws Exception { HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("table")); String key="Some"; String value="value"; desc.setConfiguration(key,value); assertEquals(value,desc.getConfigurationValue(key)); desc.removeConfiguration(key); assertEquals(null,desc.getConfigurationValue(key)); }

InternalCallVerifier BooleanVerifier 
/** * Test cps in the table description * @throws Exception */ @Test public void testSetListRemoveCP() throws Exception { HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("testGetSetRemoveCP")); String className1="org.apache.hadoop.hbase.coprocessor.BaseRegionObserver"; String className2="org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver"; assertTrue(desc.getCoprocessors().size() == 0); desc.addCoprocessor(className1); assertTrue(desc.getCoprocessors().size() == 1); assertTrue(desc.getCoprocessors().contains(className1)); desc.addCoprocessor(className2); assertTrue(desc.getCoprocessors().size() == 2); assertTrue(desc.getCoprocessors().contains(className2)); desc.removeCoprocessor(className1); assertTrue(desc.getCoprocessors().size() == 1); assertFalse(desc.getCoprocessors().contains(className1)); assertTrue(desc.getCoprocessors().contains(className2)); desc.removeCoprocessor(className2); assertTrue(desc.getCoprocessors().size() == 0); assertFalse(desc.getCoprocessors().contains(className1)); assertFalse(desc.getCoprocessors().contains(className2)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPb() throws DeserializationException, IOException { HTableDescriptor htd=new HTableDescriptor(TableName.META_TABLE_NAME); final int v=123; htd.setMaxFileSize(v); htd.setDurability(Durability.ASYNC_WAL); htd.setReadOnly(true); htd.setRegionReplication(2); byte[] bytes=htd.toByteArray(); HTableDescriptor deserializedHtd=HTableDescriptor.parseFrom(bytes); assertEquals(htd,deserializedHtd); assertEquals(v,deserializedHtd.getMaxFileSize()); assertTrue(deserializedHtd.isReadOnly()); assertEquals(Durability.ASYNC_WAL,deserializedHtd.getDurability()); assertEquals(deserializedHtd.getRegionReplication(),2); }

InternalCallVerifier EqualityVerifier 
/** * Test that we add and remove strings from settings properly. * @throws Exception */ @Test public void testRemoveString() throws Exception { HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("table")); String key="Some"; String value="value"; desc.setValue(key,value); assertEquals(value,desc.getValue(key)); desc.remove(key); assertEquals(null,desc.getValue(key)); }

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=IllegalArgumentException.class) public void testAddDuplicateFamilies(){ HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("table")); byte[] familyName=Bytes.toBytes("cf"); HColumnDescriptor hcd=new HColumnDescriptor(familyName); hcd.setBlocksize(1000); htd.addFamily(hcd); assertEquals(1000,htd.getFamily(familyName).getBlocksize()); hcd=new HColumnDescriptor(familyName); hcd.setBlocksize(2000); htd.addFamily(hcd); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyFamily(){ HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("table")); byte[] familyName=Bytes.toBytes("cf"); HColumnDescriptor hcd=new HColumnDescriptor(familyName); hcd.setBlocksize(1000); hcd.setDFSReplication((short)3); htd.addFamily(hcd); assertEquals(1000,htd.getFamily(familyName).getBlocksize()); assertEquals(3,htd.getFamily(familyName).getDFSReplication()); hcd=new HColumnDescriptor(familyName); hcd.setBlocksize(2000); hcd.setDFSReplication((short)1); htd.modifyFamily(hcd); assertEquals(2000,htd.getFamily(familyName).getBlocksize()); assertEquals(1,htd.getFamily(familyName).getDFSReplication()); }

InternalCallVerifier BooleanVerifier 
/** * Test cps in the table description * @throws Exception */ @Test public void testGetSetRemoveCP() throws Exception { HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("table")); String className="org.apache.hadoop.hbase.coprocessor.BaseRegionObserver"; desc.addCoprocessor(className); assertTrue(desc.hasCoprocessor(className)); desc.removeCoprocessor(className); assertFalse(desc.hasCoprocessor(className)); }

InternalCallVerifier EqualityVerifier 
/** * Test default value handling for memStoreFlushSize */ @Test public void testGetMemStoreFlushSize(){ HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("table")); assertEquals(-1,desc.getMemStoreFlushSize()); desc.setMemStoreFlushSize(1111L); assertEquals(1111L,desc.getMemStoreFlushSize()); }

InternalCallVerifier EqualityVerifier 
/** * Test default value handling for maxFileSize */ @Test public void testGetMaxFileSize(){ HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("table")); assertEquals(-1,desc.getMaxFileSize()); desc.setMaxFileSize(1111L); assertEquals(1111L,desc.getMaxFileSize()); }

Class: org.apache.hadoop.hbase.TestInterfaceAudienceAnnotations

InternalCallVerifier EqualityVerifier 
/** * Checks whether all the classes in client and common modules contain{@link InterfaceAudience} annotations. */ @Test public void testInterfaceAudienceAnnotation() throws ClassNotFoundException, IOException, LinkageError { ClassFinder classFinder=new ClassFinder(new And(new MainCodeResourcePathFilter(),new TestFileNameFilter()),new Not((FileNameFilter)new TestFileNameFilter()),new And(new PublicClassFilter(),new Not(new TestClassFilter()),new Not(new GeneratedClassFilter()),new Not(new IsInterfaceStabilityClassFilter()),new Not(new InterfaceAudienceAnnotatedClassFilter()),new Not(new CloverInstrumentationFilter()))); Set> classes=classFinder.findClasses(false); LOG.info("These are the classes that DO NOT have @InterfaceAudience annotation:"); for ( Class clazz : classes) { LOG.info(clazz); } Assert.assertEquals("All classes should have @InterfaceAudience annotation",0,classes.size()); }

InternalCallVerifier EqualityVerifier 
/** * Checks whether all the classes in client and common modules that are marked * InterfaceAudience.Public also have {@link InterfaceStability} annotations. */ @Test public void testInterfaceStabilityAnnotation() throws ClassNotFoundException, IOException, LinkageError { ClassFinder classFinder=new ClassFinder(new And(new MainCodeResourcePathFilter(),new TestFileNameFilter()),new Not((FileNameFilter)new TestFileNameFilter()),new And(new PublicClassFilter(),new Not(new TestClassFilter()),new Not(new GeneratedClassFilter()),new InterfaceAudiencePublicAnnotatedClassFilter(),new Not(new IsInterfaceStabilityClassFilter()),new Not(new InterfaceStabilityAnnotatedClassFilter()))); Set> classes=classFinder.findClasses(false); LOG.info("These are the classes that DO NOT have @InterfaceStability annotation:"); for ( Class clazz : classes) { LOG.info(clazz); } Assert.assertEquals("All classes that are marked with @InterfaceAudience.Public should " + "have @InterfaceStability annotation as well",0,classes.size()); }

Class: org.apache.hadoop.hbase.TestLocalHBaseCluster

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Check that we can start a local HBase cluster specifying a custom master * and regionserver class and then cast back to those classes; also that * the cluster will launch and terminate cleanly. See HBASE-6011. Uses the * HBaseTestingUtility facilities for creating a LocalHBaseCluster with * custom master and regionserver classes. */ @Test public void testLocalHBaseCluster() throws Exception { TEST_UTIL.startMiniCluster(1,1,null,MyHMaster.class,MyHRegionServer.class); try { int val=((MyHMaster)TEST_UTIL.getHBaseCluster().getMaster(0)).echo(42); assertEquals(42,val); } catch ( ClassCastException e) { fail("Could not cast master to our class"); } try { int val=((MyHRegionServer)TEST_UTIL.getHBaseCluster().getRegionServer(0)).echo(42); assertEquals(42,val); } catch ( ClassCastException e) { fail("Could not cast regionserver to our class"); } TEST_UTIL.shutdownMiniCluster(); }

Class: org.apache.hadoop.hbase.TestMetaTableAccessor

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests whether maximum of masters system time versus RSs local system time is used */ @Test public void testMastersSystemTimeIsUsedInUpdateLocations() throws IOException { long regionId=System.currentTimeMillis(); HRegionInfo regionInfo=new HRegionInfo(TableName.valueOf("table_foo"),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,false,regionId,0); ServerName sn=ServerName.valueOf("bar",0,0); Table meta=MetaTableAccessor.getMetaHTable(connection); try { List regionInfos=Lists.newArrayList(regionInfo); MetaTableAccessor.addRegionsToMeta(connection,regionInfos,1); long masterSystemTime=EnvironmentEdgeManager.currentTime() + 123456789; MetaTableAccessor.updateRegionLocation(connection,regionInfo,sn,1,masterSystemTime); Get get=new Get(regionInfo.getRegionName()); Result result=meta.get(get); Cell serverCell=result.getColumnLatestCell(HConstants.CATALOG_FAMILY,MetaTableAccessor.getServerColumn(0)); Cell startCodeCell=result.getColumnLatestCell(HConstants.CATALOG_FAMILY,MetaTableAccessor.getStartCodeColumn(0)); Cell seqNumCell=result.getColumnLatestCell(HConstants.CATALOG_FAMILY,MetaTableAccessor.getSeqNumColumn(0)); assertNotNull(serverCell); assertNotNull(startCodeCell); assertNotNull(seqNumCell); assertTrue(serverCell.getValueLength() > 0); assertTrue(startCodeCell.getValueLength() > 0); assertTrue(seqNumCell.getValueLength() > 0); assertEquals(masterSystemTime,serverCell.getTimestamp()); assertEquals(masterSystemTime,startCodeCell.getTimestamp()); assertEquals(masterSystemTime,seqNumCell.getTimestamp()); } finally { meta.close(); } }

InternalCallVerifier BooleanVerifier 
@Test public void testGetRegionsFromMetaTable() throws IOException, InterruptedException { List regions=new MetaTableLocator().getMetaRegions(UTIL.getZooKeeperWatcher()); assertTrue(regions.size() >= 1); assertTrue(new MetaTableLocator().getMetaRegionsAndLocations(UTIL.getZooKeeperWatcher()).size() >= 1); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Does {@link MetaTableAccessor#getRegion(Connection,byte[])} and a write * against hbase:meta while its hosted server is restarted to prove our retrying * works. * @throws IOException * @throws InterruptedException */ @Test public void testRetrying() throws IOException, InterruptedException { final TableName name=TableName.valueOf("testRetrying"); LOG.info("Started " + name); Table t=UTIL.createMultiRegionTable(name,HConstants.CATALOG_FAMILY); int regionCount=-1; try (RegionLocator r=UTIL.getConnection().getRegionLocator(name)){ regionCount=r.getStartKeys().length; } final List regions=testGettingTableRegions(connection,name,regionCount); MetaTask reader=new MetaTask(connection,"reader"){ @Override void metaTask() throws Throwable { testGetRegion(connection,regions.get(0)); LOG.info("Read " + regions.get(0).getEncodedName()); } } ; MetaTask writer=new MetaTask(connection,"writer"){ @Override void metaTask() throws Throwable { MetaTableAccessor.addRegionToMeta(connection,regions.get(0)); LOG.info("Wrote " + regions.get(0).getEncodedName()); } } ; reader.start(); writer.start(); final long timeOut=180000; long startTime=System.currentTimeMillis(); try { assertTrue(reader.isProgressing()); assertTrue(writer.isProgressing()); for (int i=0; i < 2; i++) { LOG.info("Restart=" + i); UTIL.ensureSomeRegionServersAvailable(2); int index=-1; do { index=UTIL.getMiniHBaseCluster().getServerWithMeta(); } while (index == -1 && startTime + timeOut < System.currentTimeMillis()); if (index != -1) { UTIL.getMiniHBaseCluster().abortRegionServer(index); UTIL.getMiniHBaseCluster().waitOnRegionServer(index); } } assertTrue("reader: " + reader.toString(),reader.isProgressing()); assertTrue("writer: " + writer.toString(),writer.isProgressing()); } catch ( IOException e) { throw e; } finally { reader.stop=true; writer.stop=true; reader.join(); writer.join(); t.close(); } long exeTime=System.currentTimeMillis() - startTime; assertTrue("Timeout: test took " + exeTime / 1000 + " sec",exeTime < timeOut); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException { long regionId=System.currentTimeMillis(); HRegionInfo regionInfoA=new HRegionInfo(TableName.valueOf("table_foo"),HConstants.EMPTY_START_ROW,new byte[]{'a'},false,regionId,0); HRegionInfo regionInfoB=new HRegionInfo(TableName.valueOf("table_foo"),new byte[]{'a'},HConstants.EMPTY_END_ROW,false,regionId,0); HRegionInfo mergedRegionInfo=new HRegionInfo(TableName.valueOf("table_foo"),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,false,regionId,0); ServerName sn=ServerName.valueOf("bar",0,0); Table meta=MetaTableAccessor.getMetaHTable(connection); try { List regionInfos=Lists.newArrayList(regionInfoA,regionInfoB); MetaTableAccessor.addRegionsToMeta(connection,regionInfos,1); long serverNameTime=EnvironmentEdgeManager.currentTime() + 100000000; long masterSystemTime=EnvironmentEdgeManager.currentTime() + 123456789; MetaTableAccessor.updateRegionLocation(connection,regionInfoA,sn,1,serverNameTime); Get get=new Get(mergedRegionInfo.getRegionName()); Result result=meta.get(get); Cell serverCell=result.getColumnLatestCell(HConstants.CATALOG_FAMILY,MetaTableAccessor.getServerColumn(0)); assertNotNull(serverCell); assertEquals(serverNameTime,serverCell.getTimestamp()); MetaTableAccessor.mergeRegions(connection,mergedRegionInfo,regionInfoA,regionInfoB,sn,1,masterSystemTime); result=meta.get(get); serverCell=result.getColumnLatestCell(HConstants.CATALOG_FAMILY,MetaTableAccessor.getServerColumn(0)); Cell startCodeCell=result.getColumnLatestCell(HConstants.CATALOG_FAMILY,MetaTableAccessor.getStartCodeColumn(0)); Cell seqNumCell=result.getColumnLatestCell(HConstants.CATALOG_FAMILY,MetaTableAccessor.getSeqNumColumn(0)); assertNull(serverCell); assertNull(startCodeCell); assertNull(seqNumCell); } finally { meta.close(); } }

Class: org.apache.hadoop.hbase.TestMetaTableAccessorNoCluster

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that MetaTableAccessor will ride over server throwing * "Server not running" IOEs. * @see @link {https://issues.apache.org/jira/browse/HBASE-3446} * @throws IOException * @throws InterruptedException */ @Test public void testRideOverServerNotRunning() throws IOException, InterruptedException, ServiceException { ZooKeeperWatcher zkw=new ZooKeeperWatcher(UTIL.getConfiguration(),this.getClass().getSimpleName(),ABORTABLE,true); ServerName sn=ServerName.valueOf("example.com",1234,System.currentTimeMillis()); ClusterConnection connection=null; try { final ClientProtos.ClientService.BlockingInterface implementation=Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); List kvs=new ArrayList(); final byte[] rowToVerify=Bytes.toBytes("rowToVerify"); kvs.add(new KeyValue(rowToVerify,HConstants.CATALOG_FAMILY,HConstants.REGIONINFO_QUALIFIER,HRegionInfo.FIRST_META_REGIONINFO.toByteArray())); kvs.add(new KeyValue(rowToVerify,HConstants.CATALOG_FAMILY,HConstants.SERVER_QUALIFIER,Bytes.toBytes(sn.getHostAndPort()))); kvs.add(new KeyValue(rowToVerify,HConstants.CATALOG_FAMILY,HConstants.STARTCODE_QUALIFIER,Bytes.toBytes(sn.getStartcode()))); final List cellScannables=new ArrayList(1); cellScannables.add(Result.create(kvs)); final ScanResponse.Builder builder=ScanResponse.newBuilder(); for ( CellScannable result : cellScannables) { builder.addCellsPerResult(((Result)result).size()); } Mockito.when(implementation.scan((RpcController)Mockito.any(),(ScanRequest)Mockito.any())).thenThrow(new ServiceException("Server not running (1 of 3)")).thenThrow(new ServiceException("Server not running (2 of 3)")).thenThrow(new ServiceException("Server not running (3 of 3)")).thenReturn(ScanResponse.newBuilder().setScannerId(1234567890L).build()).thenAnswer(new Answer(){ public ScanResponse answer( InvocationOnMock invocation) throws Throwable { ((PayloadCarryingRpcController)invocation.getArguments()[0]).setCellScanner(CellUtil.createCellScanner(cellScannables)); return builder.build(); } } ).thenReturn(ScanResponse.newBuilder().setMoreResults(false).build()); connection=HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration()); final HRegionLocation anyLocation=new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,sn); final RegionLocations rl=new RegionLocations(anyLocation); Mockito.doReturn(rl).when(connection).locateRegion((TableName)Mockito.any(),(byte[])Mockito.any(),Mockito.anyBoolean(),Mockito.anyBoolean(),Mockito.anyInt()); Mockito.doReturn(implementation).when(connection).getClient(Mockito.any(ServerName.class)); NavigableMap hris=MetaTableAccessor.getServerUserRegions(connection,sn); assertEquals(1,hris.size()); assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO)); assertTrue(Bytes.equals(rowToVerify,hris.firstEntry().getValue().getRow())); Mockito.verify(implementation,Mockito.times(6)).scan((RpcController)Mockito.any(),(ScanRequest)Mockito.any()); } finally { if (connection != null && !connection.isClosed()) connection.close(); zkw.close(); } }

Class: org.apache.hadoop.hbase.TestMetaTableLocator

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test interruptable while blocking wait on meta. * @throws IOException * @throws ServiceException * @throws InterruptedException */ @Test public void testInterruptWaitOnMeta() throws IOException, InterruptedException, ServiceException { final ClientProtos.ClientService.BlockingInterface client=Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); Mockito.when(client.get((RpcController)Mockito.any(),(GetRequest)Mockito.any())).thenReturn(GetResponse.newBuilder().build()); final MetaTableLocator mtl=new MetaTableLocator(); ServerName meta=new MetaTableLocator().getMetaRegionLocation(this.watcher); assertNull(meta); Thread t=new Thread(){ @Override public void run(){ try { mtl.waitMetaRegionLocation(watcher); } catch ( InterruptedException e) { throw new RuntimeException("Interrupted",e); } } } ; t.start(); while (!t.isAlive()) Threads.sleep(1); Threads.sleep(1); assertTrue(t.isAlive()); mtl.stop(); t.join(); }

InternalCallVerifier BooleanVerifier 
/** * Test get of meta region fails properly if nothing to connect to. * @throws IOException * @throws InterruptedException * @throws KeeperException * @throws ServiceException */ @Test public void testVerifyMetaRegionLocationFails() throws IOException, InterruptedException, KeeperException, ServiceException { ClusterConnection connection=Mockito.mock(ClusterConnection.class); ServiceException connectException=new ServiceException(new ConnectException("Connection refused")); final AdminProtos.AdminService.BlockingInterface implementation=Mockito.mock(AdminProtos.AdminService.BlockingInterface.class); Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),(GetRegionInfoRequest)Mockito.any())).thenThrow(connectException); Mockito.when(connection.getAdmin(Mockito.any(ServerName.class))).thenReturn(implementation); ServerName sn=ServerName.valueOf("example.com",1234,System.currentTimeMillis()); MetaTableLocator.setMetaLocation(this.watcher,sn,RegionState.State.OPENING); assertFalse(new MetaTableLocator().verifyMetaRegionLocation(connection,watcher,100)); MetaTableLocator.setMetaLocation(this.watcher,sn,RegionState.State.OPEN); assertFalse(new MetaTableLocator().verifyMetaRegionLocation(connection,watcher,100)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test normal operations */ @Test public void testMetaLookup() throws IOException, InterruptedException, ServiceException, KeeperException { final ClientProtos.ClientService.BlockingInterface client=Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); Mockito.when(client.get((RpcController)Mockito.any(),(GetRequest)Mockito.any())).thenReturn(GetResponse.newBuilder().build()); final MetaTableLocator mtl=new MetaTableLocator(); assertNull(mtl.getMetaRegionLocation(this.watcher)); for ( RegionState.State state : RegionState.State.values()) { if (state.equals(RegionState.State.OPEN)) continue; MetaTableLocator.setMetaLocation(this.watcher,SN,state); assertNull(mtl.getMetaRegionLocation(this.watcher)); assertEquals(state,MetaTableLocator.getMetaRegionState(this.watcher).getState()); } MetaTableLocator.setMetaLocation(this.watcher,SN,RegionState.State.OPEN); assertEquals(mtl.getMetaRegionLocation(this.watcher),SN); assertEquals(RegionState.State.OPEN,MetaTableLocator.getMetaRegionState(this.watcher).getState()); mtl.deleteMetaLocation(this.watcher); assertNull(MetaTableLocator.getMetaRegionState(this.watcher).getServerName()); assertEquals(MetaTableLocator.getMetaRegionState(this.watcher).getState(),RegionState.State.OFFLINE); assertNull(mtl.getMetaRegionLocation(this.watcher)); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test waiting on meat w/ no timeout specified. * @throws IOException * @throws InterruptedException * @throws KeeperException */ @Test public void testNoTimeoutWaitForMeta() throws IOException, InterruptedException, KeeperException { final MetaTableLocator mtl=new MetaTableLocator(); ServerName hsa=mtl.getMetaRegionLocation(watcher); assertNull(hsa); Thread t=new WaitOnMetaThread(); startWaitAliveThenWaitItLives(t,1); MetaTableLocator.setMetaLocation(this.watcher,SN,RegionState.State.OPEN); hsa=SN; t.join(); assertTrue(mtl.getMetaRegionLocation(watcher).equals(hsa)); }

Class: org.apache.hadoop.hbase.TestMultiVersions

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Verifies versions across a cluster restart. * Port of old TestGetRowVersions test to here so can better utilize the spun * up cluster running more than a single test per spin up. Keep old tests' * crazyness. */ @Test public void testGetRowVersions() throws Exception { final String tableName="testGetRowVersions"; final byte[] contents=Bytes.toBytes("contents"); final byte[] row=Bytes.toBytes("row"); final byte[] value1=Bytes.toBytes("value1"); final byte[] value2=Bytes.toBytes("value2"); final long timestamp1=100L; final long timestamp2=200L; final HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor hcd=new HColumnDescriptor(contents); hcd.setMaxVersions(3); desc.addFamily(hcd); this.admin.createTable(desc); Put put=new Put(row,timestamp1); put.addColumn(contents,contents,value1); Table table=UTIL.getConnection().getTable(desc.getTableName()); table.put(put); table.close(); UTIL.shutdownMiniHBaseCluster(); LOG.debug("HBase cluster shut down -- restarting"); UTIL.startMiniHBaseCluster(1,NUM_SLAVES); table=UTIL.getConnection().getTable(desc.getTableName()); put=new Put(row,timestamp2); put.addColumn(contents,contents,value2); table.put(put); Get get=new Get(row); Result r=table.get(get); assertNotNull(r); assertFalse(r.isEmpty()); assertTrue(r.size() == 1); byte[] value=r.getValue(contents,contents); assertTrue(value.length != 0); assertTrue(Bytes.equals(value,value2)); get=new Get(row); get.setMaxVersions(); r=table.get(get); assertTrue(r.size() == 2); value=r.getValue(contents,contents); assertTrue(value.length != 0); assertTrue(Bytes.equals(value,value2)); NavigableMap>> map=r.getMap(); NavigableMap> familyMap=map.get(contents); NavigableMap versionMap=familyMap.get(contents); assertTrue(versionMap.size() == 2); assertTrue(Bytes.equals(value1,versionMap.get(timestamp1))); assertTrue(Bytes.equals(value2,versionMap.get(timestamp2))); table.close(); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Port of old TestScanMultipleVersions test here so can better utilize the * spun up cluster running more than just a single test. Keep old tests * crazyness. *

Tests five cases of scans and timestamps. * @throws Exception */ @Test public void testScanMultipleVersions() throws Exception { final TableName tableName=TableName.valueOf("testScanMultipleVersions"); final HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); final byte[][] rows=new byte[][]{Bytes.toBytes("row_0200"),Bytes.toBytes("row_0800")}; final byte[][] splitRows=new byte[][]{Bytes.toBytes("row_0500")}; final long[] timestamp=new long[]{100L,1000L}; this.admin.createTable(desc,splitRows); Table table=UTIL.getConnection().getTable(tableName); Pair keys=UTIL.getConnection().getRegionLocator(tableName).getStartEndKeys(); assertEquals(2,keys.getFirst().length); byte[][] startKeys=keys.getFirst(); byte[][] endKeys=keys.getSecond(); for (int i=0; i < startKeys.length; i++) { if (i == 0) { assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW,startKeys[i])); assertTrue(Bytes.equals(endKeys[i],splitRows[0])); } else if (i == 1) { assertTrue(Bytes.equals(splitRows[0],startKeys[i])); assertTrue(Bytes.equals(endKeys[i],HConstants.EMPTY_END_ROW)); } } List puts=new ArrayList<>(); for (int i=0; i < startKeys.length; i++) { for (int j=0; j < timestamp.length; j++) { Put put=new Put(rows[i],timestamp[j]); put.addColumn(HConstants.CATALOG_FAMILY,null,timestamp[j],Bytes.toBytes(timestamp[j])); puts.add(put); } } table.put(puts); for (int i=0; i < rows.length; i++) { for (int j=0; j < timestamp.length; j++) { Get get=new Get(rows[i]); get.addFamily(HConstants.CATALOG_FAMILY); get.setTimeStamp(timestamp[j]); Result result=table.get(get); int cellCount=0; for ( @SuppressWarnings("unused") Cell kv : result.listCells()) { cellCount++; } assertTrue(cellCount == 1); } } int count=0; Scan scan=new Scan(); scan.addFamily(HConstants.CATALOG_FAMILY); ResultScanner s=table.getScanner(scan); try { for (Result rr=null; (rr=s.next()) != null; ) { System.out.println(rr.toString()); count+=1; } assertEquals("Number of rows should be 2",2,count); } finally { s.close(); } count=0; scan=new Scan(); scan.setTimeRange(1000L,Long.MAX_VALUE); scan.addFamily(HConstants.CATALOG_FAMILY); s=table.getScanner(scan); try { while (s.next() != null) { count+=1; } assertEquals("Number of rows should be 2",2,count); } finally { s.close(); } count=0; scan=new Scan(); scan.setTimeStamp(1000L); scan.addFamily(HConstants.CATALOG_FAMILY); s=table.getScanner(scan); try { while (s.next() != null) { count+=1; } assertEquals("Number of rows should be 2",2,count); } finally { s.close(); } count=0; scan=new Scan(); scan.setTimeRange(100L,1000L); scan.addFamily(HConstants.CATALOG_FAMILY); s=table.getScanner(scan); try { while (s.next() != null) { count+=1; } assertEquals("Number of rows should be 2",2,count); } finally { s.close(); } count=0; scan=new Scan(); scan.setTimeStamp(100L); scan.addFamily(HConstants.CATALOG_FAMILY); s=table.getScanner(scan); try { while (s.next() != null) { count+=1; } assertEquals("Number of rows should be 2",2,count); } finally { s.close(); } }


Class: org.apache.hadoop.hbase.TestNamespace

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testNamespaceOperations() throws IOException { admin.createNamespace(NamespaceDescriptor.create(prefix + "ns1").build()); admin.createNamespace(NamespaceDescriptor.create(prefix + "ns2").build()); runWithExpectedException(new Callable(){ @Override public Void call() throws Exception { admin.createNamespace(NamespaceDescriptor.create(prefix + "ns1").build()); return null; } } ,NamespaceExistException.class); runWithExpectedException(new Callable(){ @Override public Void call() throws Exception { HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("non_existing_namespace","table1")); htd.addFamily(new HColumnDescriptor("family1")); admin.createTable(htd); return null; } } ,NamespaceNotFoundException.class); admin.getNamespaceDescriptor(prefix + "ns1"); runWithExpectedException(new Callable(){ @Override public NamespaceDescriptor call() throws Exception { return admin.getNamespaceDescriptor("non_existing_namespace"); } } ,NamespaceNotFoundException.class); admin.deleteNamespace(prefix + "ns2"); runWithExpectedException(new Callable(){ @Override public Void call() throws Exception { admin.deleteNamespace("non_existing_namespace"); return null; } } ,NamespaceNotFoundException.class); NamespaceDescriptor ns1=admin.getNamespaceDescriptor(prefix + "ns1"); ns1.setConfiguration("foo","bar"); admin.modifyNamespace(ns1); runWithExpectedException(new Callable(){ @Override public Void call() throws Exception { admin.modifyNamespace(NamespaceDescriptor.create("non_existing_namespace").build()); return null; } } ,NamespaceNotFoundException.class); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(prefix + "ns1","table1")); htd.addFamily(new HColumnDescriptor("family1")); admin.createTable(htd); HTableDescriptor[] htds=admin.listTableDescriptorsByNamespace(prefix + "ns1"); assertNotNull("Should have not returned null",htds); assertEquals("Should have returned non-empty array",1,htds.length); runWithExpectedException(new Callable(){ @Override public Void call() throws Exception { admin.listTableDescriptorsByNamespace("non_existant_namespace"); return null; } } ,NamespaceNotFoundException.class); TableName[] tableNames=admin.listTableNamesByNamespace(prefix + "ns1"); assertNotNull("Should have not returned null",tableNames); assertEquals("Should have returned non-empty array",1,tableNames.length); runWithExpectedException(new Callable(){ @Override public Void call() throws Exception { admin.listTableNamesByNamespace("non_existing_namespace"); return null; } } ,NamespaceNotFoundException.class); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void createTableInSystemNamespace() throws Exception { TableName tableName=TableName.valueOf("hbase:createTableInSystemNamespace"); HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor colDesc=new HColumnDescriptor("cf1"); desc.addFamily(colDesc); admin.createTable(desc); assertEquals(0,admin.listTables().length); assertTrue(admin.tableExists(tableName)); admin.disableTable(desc.getTableName()); admin.deleteTable(desc.getTableName()); }

InternalCallVerifier BooleanVerifier 
@Test public void createTableInDefaultNamespace() throws Exception { HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("default_table")); HColumnDescriptor colDesc=new HColumnDescriptor("cf1"); desc.addFamily(colDesc); admin.createTable(desc); assertTrue(admin.listTables().length == 1); admin.disableTable(desc.getTableName()); admin.deleteTable(desc.getTableName()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void createDoubleTest() throws IOException, InterruptedException { String testName="createDoubleTest"; String nsName=prefix + "_" + testName; LOG.info(testName); TableName tableName=TableName.valueOf("my_table"); TableName tableNameFoo=TableName.valueOf(nsName + ":my_table"); admin.createNamespace(NamespaceDescriptor.create(nsName).build()); TEST_UTIL.createTable(tableName,Bytes.toBytes(nsName)); TEST_UTIL.createTable(tableNameFoo,Bytes.toBytes(nsName)); assertEquals(2,admin.listTables().length); assertNotNull(admin.getTableDescriptor(tableName)); assertNotNull(admin.getTableDescriptor(tableNameFoo)); admin.disableTable(tableName); admin.deleteTable(tableName); assertEquals(1,admin.listTables().length); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void createTableTest() throws IOException, InterruptedException { String testName="createTableTest"; String nsName=prefix + "_" + testName; LOG.info(testName); HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(nsName + ":my_table")); HColumnDescriptor colDesc=new HColumnDescriptor("my_cf"); desc.addFamily(colDesc); try { admin.createTable(desc); fail("Expected no namespace exists exception"); } catch ( NamespaceNotFoundException ex) { } admin.createNamespace(NamespaceDescriptor.create(nsName).build()); admin.createTable(desc); TEST_UTIL.waitTableAvailable(desc.getTableName().getName(),10000); FileSystem fs=FileSystem.get(TEST_UTIL.getConfiguration()); assertTrue(fs.exists(new Path(master.getMasterFileSystem().getRootDir(),new Path(HConstants.BASE_NAMESPACE_DIR,new Path(nsName,desc.getTableName().getQualifierAsString()))))); assertEquals(1,admin.listTables().length); try { admin.deleteNamespace(nsName); fail("Expected non-empty namespace constraint exception"); } catch ( Exception ex) { LOG.info("Caught expected exception: " + ex); } Table table=TEST_UTIL.getConnection().getTable(desc.getTableName()); Put p=new Put(Bytes.toBytes("row1")); p.addColumn(Bytes.toBytes("my_cf"),Bytes.toBytes("my_col"),Bytes.toBytes("value1")); table.put(p); admin.flush(desc.getTableName()); Get g=new Get(Bytes.toBytes("row1")); assertTrue(table.exists(g)); TEST_UTIL.deleteTable(desc.getTableName()); admin.deleteNamespace(nsName); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void createRemoveTest() throws Exception { String testName="createRemoveTest"; String nsName=prefix + "_" + testName; LOG.info(testName); admin.createNamespace(NamespaceDescriptor.create(nsName).build()); assertEquals(3,admin.listNamespaceDescriptors().length); TEST_UTIL.waitFor(60000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return zkNamespaceManager.list().size() == 3; } } ); assertNotNull(zkNamespaceManager.get(nsName)); admin.deleteNamespace(nsName); assertEquals(2,admin.listNamespaceDescriptors().length); assertEquals(2,zkNamespaceManager.list().size()); assertNull(zkNamespaceManager.get(nsName)); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void verifyReservedNS() throws IOException { NamespaceDescriptor ns=admin.getNamespaceDescriptor(NamespaceDescriptor.DEFAULT_NAMESPACE.getName()); assertNotNull(ns); assertEquals(ns.getName(),NamespaceDescriptor.DEFAULT_NAMESPACE.getName()); assertNotNull(zkNamespaceManager.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)); ns=admin.getNamespaceDescriptor(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); assertNotNull(ns); assertEquals(ns.getName(),NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); assertNotNull(zkNamespaceManager.get(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)); assertEquals(2,admin.listNamespaceDescriptors().length); Set systemTables=Sets.newHashSet(TableName.META_TABLE_NAME,TableName.NAMESPACE_TABLE_NAME); HTableDescriptor[] descs=admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); assertEquals(systemTables.size(),descs.length); for ( HTableDescriptor desc : descs) { assertTrue(systemTables.contains(desc.getTableName())); } assertEquals(0,admin.listTables().length); boolean exceptionCaught=false; try { admin.createNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE); } catch ( IOException exp) { LOG.warn(exp); exceptionCaught=true; } finally { assertTrue(exceptionCaught); } exceptionCaught=false; try { admin.createNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE); } catch ( IOException exp) { LOG.warn(exp); exceptionCaught=true; } finally { assertTrue(exceptionCaught); } }

Class: org.apache.hadoop.hbase.TestNodeHealthCheckChore

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testRSHealthChore() throws Exception { Stoppable stop=new StoppableImplementation(); Configuration conf=getConfForNodeHealthScript(); String errorScript="echo ERROR" + eol + " echo \"Server not healthy\""; createScript(errorScript,true); HealthCheckChore rsChore=new HealthCheckChore(100,stop,conf); try { rsChore.chore(); rsChore.chore(); assertFalse("Stoppable must not be stopped.",stop.isStopped()); rsChore.chore(); assertTrue("Stoppable must have been stopped.",stop.isStopped()); } finally { stop.stop("Finished w/ test"); } }

Class: org.apache.hadoop.hbase.TestOffheapKeyValue

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testByteBufferBackedKeyValue() throws Exception { KeyValue kvCell=new KeyValue(row1,fam1,qual1,0l,Type.Put,row1); ByteBuffer buf=ByteBuffer.allocateDirect(kvCell.getBuffer().length); ByteBufferUtils.copyFromArrayToBuffer(buf,kvCell.getBuffer(),0,kvCell.getBuffer().length); ByteBufferedCell offheapKV=new OffheapKeyValue(buf,0,buf.capacity(),false,0l); assertEquals(ROW1,ByteBufferUtils.toStringBinary(offheapKV.getRowByteBuffer(),offheapKV.getRowPosition(),offheapKV.getRowLength())); assertEquals(FAM1,ByteBufferUtils.toStringBinary(offheapKV.getFamilyByteBuffer(),offheapKV.getFamilyPosition(),offheapKV.getFamilyLength())); assertEquals(QUAL1,ByteBufferUtils.toStringBinary(offheapKV.getQualifierByteBuffer(),offheapKV.getQualifierPosition(),offheapKV.getQualifierLength())); assertEquals(ROW1,ByteBufferUtils.toStringBinary(offheapKV.getValueByteBuffer(),offheapKV.getValuePosition(),offheapKV.getValueLength())); assertEquals(0L,offheapKV.getTimestamp()); assertEquals(Type.Put.getCode(),offheapKV.getTypeByte()); assertEquals(ROW1,Bytes.toStringBinary(offheapKV.getRowArray(),offheapKV.getRowOffset(),offheapKV.getRowLength())); assertEquals(FAM1,Bytes.toStringBinary(offheapKV.getFamilyArray(),offheapKV.getFamilyOffset(),offheapKV.getFamilyLength())); assertEquals(QUAL1,Bytes.toStringBinary(offheapKV.getQualifierArray(),offheapKV.getQualifierOffset(),offheapKV.getQualifierLength())); assertEquals(ROW1,Bytes.toStringBinary(offheapKV.getValueArray(),offheapKV.getValueOffset(),offheapKV.getValueLength())); assertEquals(0L,offheapKV.getTimestamp()); assertEquals(Type.Put.getCode(),offheapKV.getTypeByte()); kvCell=new KeyValue(row1,fam2,qual2,0l,Type.Put,row1); buf=ByteBuffer.allocateDirect(kvCell.getBuffer().length); ByteBufferUtils.copyFromArrayToBuffer(buf,kvCell.getBuffer(),0,kvCell.getBuffer().length); offheapKV=new OffheapKeyValue(buf,0,buf.capacity(),false,0l); assertEquals(FAM2,ByteBufferUtils.toStringBinary(offheapKV.getFamilyByteBuffer(),offheapKV.getFamilyPosition(),offheapKV.getFamilyLength())); assertEquals(QUAL2,ByteBufferUtils.toStringBinary(offheapKV.getQualifierByteBuffer(),offheapKV.getQualifierPosition(),offheapKV.getQualifierLength())); byte[] nullQualifier=new byte[0]; kvCell=new KeyValue(row1,fam1,nullQualifier,0L,Type.Put,row1); buf=ByteBuffer.allocateDirect(kvCell.getBuffer().length); ByteBufferUtils.copyFromArrayToBuffer(buf,kvCell.getBuffer(),0,kvCell.getBuffer().length); offheapKV=new OffheapKeyValue(buf,0,buf.capacity(),false,0l); assertEquals(ROW1,ByteBufferUtils.toStringBinary(offheapKV.getRowByteBuffer(),offheapKV.getRowPosition(),offheapKV.getRowLength())); assertEquals(FAM1,ByteBufferUtils.toStringBinary(offheapKV.getFamilyByteBuffer(),offheapKV.getFamilyPosition(),offheapKV.getFamilyLength())); assertEquals("",ByteBufferUtils.toStringBinary(offheapKV.getQualifierByteBuffer(),offheapKV.getQualifierPosition(),offheapKV.getQualifierLength())); assertEquals(ROW1,ByteBufferUtils.toStringBinary(offheapKV.getValueByteBuffer(),offheapKV.getValuePosition(),offheapKV.getValueLength())); assertEquals(0L,offheapKV.getTimestamp()); assertEquals(Type.Put.getCode(),offheapKV.getTypeByte()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetKeyMethods() throws Exception { KeyValue kvCell=new KeyValue(row1,fam1,qual1,0l,Type.Put,row1,tags); ByteBuffer buf=ByteBuffer.allocateDirect(kvCell.getKeyLength()); ByteBufferUtils.copyFromArrayToBuffer(buf,kvCell.getBuffer(),kvCell.getKeyOffset(),kvCell.getKeyLength()); ByteBufferedCell offheapKeyOnlyKV=new ByteBufferedKeyOnlyKeyValue(buf,0,buf.capacity()); assertEquals(ROW1,ByteBufferUtils.toStringBinary(offheapKeyOnlyKV.getRowByteBuffer(),offheapKeyOnlyKV.getRowPosition(),offheapKeyOnlyKV.getRowLength())); assertEquals(FAM1,ByteBufferUtils.toStringBinary(offheapKeyOnlyKV.getFamilyByteBuffer(),offheapKeyOnlyKV.getFamilyPosition(),offheapKeyOnlyKV.getFamilyLength())); assertEquals(QUAL1,ByteBufferUtils.toStringBinary(offheapKeyOnlyKV.getQualifierByteBuffer(),offheapKeyOnlyKV.getQualifierPosition(),offheapKeyOnlyKV.getQualifierLength())); assertEquals(0L,offheapKeyOnlyKV.getTimestamp()); assertEquals(Type.Put.getCode(),offheapKeyOnlyKV.getTypeByte()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testByteBufferBackedKeyValueWithTags() throws Exception { KeyValue kvCell=new KeyValue(row1,fam1,qual1,0l,Type.Put,row1,tags); ByteBuffer buf=ByteBuffer.allocateDirect(kvCell.getBuffer().length); ByteBufferUtils.copyFromArrayToBuffer(buf,kvCell.getBuffer(),0,kvCell.getBuffer().length); ByteBufferedCell offheapKV=new OffheapKeyValue(buf,0,buf.capacity(),true,0l); assertEquals(ROW1,ByteBufferUtils.toStringBinary(offheapKV.getRowByteBuffer(),offheapKV.getRowPosition(),offheapKV.getRowLength())); assertEquals(FAM1,ByteBufferUtils.toStringBinary(offheapKV.getFamilyByteBuffer(),offheapKV.getFamilyPosition(),offheapKV.getFamilyLength())); assertEquals(QUAL1,ByteBufferUtils.toStringBinary(offheapKV.getQualifierByteBuffer(),offheapKV.getQualifierPosition(),offheapKV.getQualifierLength())); assertEquals(ROW1,ByteBufferUtils.toStringBinary(offheapKV.getValueByteBuffer(),offheapKV.getValuePosition(),offheapKV.getValueLength())); assertEquals(0L,offheapKV.getTimestamp()); assertEquals(Type.Put.getCode(),offheapKV.getTypeByte()); List resTags=TagUtil.asList(offheapKV.getTagsArray(),offheapKV.getTagsOffset(),offheapKV.getTagsLength()); Tag tag1=resTags.get(0); assertEquals(t1.getType(),tag1.getType()); assertEquals(TagUtil.getValueAsString(t1),TagUtil.getValueAsString(tag1)); Tag tag2=resTags.get(1); assertEquals(tag2.getType(),tag2.getType()); assertEquals(TagUtil.getValueAsString(t2),TagUtil.getValueAsString(tag2)); Tag res=CellUtil.getTag(offheapKV,(byte)2); assertEquals(TagUtil.getValueAsString(t2),TagUtil.getValueAsString(tag2)); res=CellUtil.getTag(offheapKV,(byte)3); assertNull(res); }

Class: org.apache.hadoop.hbase.TestPerformanceEvaluation

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testZipfian() throws NoSuchMethodException, SecurityException, InstantiationException, IllegalAccessException, IllegalArgumentException, InvocationTargetException { TestOptions opts=new PerformanceEvaluation.TestOptions(); opts.setValueZipf(true); final int valueSize=1024; opts.setValueSize(valueSize); RandomReadTest rrt=new RandomReadTest(null,opts,null); Constructor ctor=Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); ctor.setAccessible(true); Histogram histogram=(Histogram)ctor.newInstance(new UniformReservoir(1024 * 500)); for (int i=0; i < 100; i++) { histogram.update(rrt.getValueLength(null)); } Snapshot snapshot=histogram.getSnapshot(); double stddev=snapshot.getStdDev(); assertTrue(stddev != 0 && stddev != 1.0); assertTrue(snapshot.getStdDev() != 0); double median=snapshot.getMedian(); assertTrue(median != 0 && median != 1 && median != valueSize); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier IgnoredMethod 
/** * Exercise the mr spec writing. Simple assertions to make sure it is basically working. * @throws IOException */ @Ignore @Test public void testWriteInputFile() throws IOException { TestOptions opts=new PerformanceEvaluation.TestOptions(); final int clients=10; opts.setNumClientThreads(clients); opts.setPerClientRunRows(10); Path dir=PerformanceEvaluation.writeInputFile(HTU.getConfiguration(),opts,HTU.getDataTestDir()); FileSystem fs=FileSystem.get(HTU.getConfiguration()); Path p=new Path(dir,PerformanceEvaluation.JOB_INPUT_FILENAME); long len=fs.getFileStatus(p).getLen(); assertTrue(len > 0); byte[] content=new byte[(int)len]; FSDataInputStream dis=fs.open(p); try { dis.readFully(content); BufferedReader br=new BufferedReader(new InputStreamReader(new ByteArrayInputStream(content))); int count=0; while (br.readLine() != null) { count++; } assertEquals(clients * PerformanceEvaluation.TASKS_PER_CLIENT,count); } finally { dis.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSizeCalculation(){ TestOptions opts=new PerformanceEvaluation.TestOptions(); opts=PerformanceEvaluation.calculateRowsAndSize(opts); int rows=opts.getPerClientRunRows(); final int defaultPerClientRunRows=1024 * 1024; assertEquals(defaultPerClientRunRows,rows); opts.setSize(2.0f); opts=PerformanceEvaluation.calculateRowsAndSize(opts); assertEquals(defaultPerClientRunRows * 2,opts.getPerClientRunRows()); opts.setNumClientThreads(2); opts=PerformanceEvaluation.calculateRowsAndSize(opts); assertEquals(defaultPerClientRunRows,opts.getPerClientRunRows()); opts.valueRandom=true; opts=PerformanceEvaluation.calculateRowsAndSize(opts); assertEquals(defaultPerClientRunRows * 2,opts.getPerClientRunRows()); }

InternalCallVerifier BooleanVerifier 
@Test public void testSerialization() throws JsonGenerationException, JsonMappingException, IOException { PerformanceEvaluation.TestOptions options=new PerformanceEvaluation.TestOptions(); assertTrue(!options.isAutoFlush()); options.setAutoFlush(true); ObjectMapper mapper=new ObjectMapper(); String optionsString=mapper.writeValueAsString(options); PerformanceEvaluation.TestOptions optionsDeserialized=mapper.readValue(optionsString,PerformanceEvaluation.TestOptions.class); assertTrue(optionsDeserialized.isAutoFlush()); }

Class: org.apache.hadoop.hbase.TestRegionLocations

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testConstructWithNullElements(){ RegionLocations list=new RegionLocations((HRegionLocation)null); assertTrue(list.isEmpty()); assertEquals(1,list.size()); assertEquals(0,list.numNonNullElements()); list=new RegionLocations(null,hrl(info1,sn0)); assertFalse(list.isEmpty()); assertEquals(2,list.size()); assertEquals(1,list.numNonNullElements()); list=new RegionLocations(hrl(info0,sn0),null); assertEquals(2,list.size()); assertEquals(1,list.numNonNullElements()); list=new RegionLocations(null,hrl(info2,sn0),null,hrl(info9,sn0)); assertEquals(10,list.size()); assertEquals(2,list.numNonNullElements()); list=new RegionLocations(null,hrl(info2,sn0),null,hrl(info9,sn0),null); assertEquals(11,list.size()); assertEquals(2,list.numNonNullElements()); list=new RegionLocations(null,hrl(info2,sn0),null,hrl(info9,sn0),null,null); assertEquals(12,list.size()); assertEquals(2,list.numNonNullElements()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testUpdateLocationWithDifferentRegionId(){ RegionLocations list; HRegionInfo info0=hri(regionId1,0); HRegionInfo info1=hri(regionId2,1); HRegionInfo info2=hri(regionId1,2); list=new RegionLocations(hrl(info0,sn1),hrl(info2,sn1)); list=list.updateLocation(hrl(info1,sn2),false,true); assertNull(list.getRegionLocation(0)); assertNotNull(list.getRegionLocation(1)); assertNull(list.getRegionLocation(2)); assertEquals(sn2,list.getRegionLocation(1).getServerName()); assertEquals(3,list.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRemove(){ RegionLocations list; list=new RegionLocations(); assertTrue(list == list.remove(hrl(info0,sn0))); list=hrll(hrl(info0,sn0)); assertTrue(list == list.remove(hrl(info0,sn1))); list=list.remove(hrl(info0,sn0)); assertTrue(list.isEmpty()); list=hrll(hrl(info0,sn0),hrl(info1,sn1),hrl(info2,sn2),hrl(info9,sn2)); assertTrue(list == list.remove(hrl(info1,sn3))); list=list.remove(hrl(info0,sn0)); assertNull(list.getRegionLocation(0)); assertEquals(sn1,list.getRegionLocation(1).getServerName()); assertEquals(sn2,list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertEquals(sn2,list.getRegionLocation(9).getServerName()); list=list.remove(hrl(info9,sn2)); assertNull(list.getRegionLocation(0)); assertEquals(sn1,list.getRegionLocation(1).getServerName()); assertEquals(sn2,list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); list=hrll(hrl(info0,sn1),hrl(info1,sn1),hrl(info2,sn0),hrl(info9,sn0)); list=list.remove(hrl(info9,sn0)); assertEquals(sn1,list.getRegionLocation(0).getServerName()); assertEquals(sn1,list.getRegionLocation(1).getServerName()); assertEquals(sn0,list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testUpdateLocation(){ RegionLocations list; list=new RegionLocations(); list=list.updateLocation(hrl(info0,sn1),false,false); assertEquals(sn1,list.getRegionLocation(0).getServerName()); list=list.updateLocation(hrl(info9,sn3,10),false,false); assertEquals(sn3,list.getRegionLocation(9).getServerName()); assertEquals(10,list.size()); list=list.updateLocation(hrl(info2,sn2,10),false,false); assertEquals(sn2,list.getRegionLocation(2).getServerName()); assertEquals(10,list.size()); list=list.updateLocation(hrl(info2,sn3,11),false,false); assertEquals(sn3,list.getRegionLocation(2).getServerName()); assertEquals(sn3,list.getRegionLocation(9).getServerName()); list=list.updateLocation(hrl(info2,sn1,11),false,false); assertEquals(sn3,list.getRegionLocation(2).getServerName()); assertEquals(sn3,list.getRegionLocation(9).getServerName()); list=list.updateLocation(hrl(info2,sn1,11),true,false); assertEquals(sn1,list.getRegionLocation(2).getServerName()); assertEquals(sn3,list.getRegionLocation(9).getServerName()); list=list.updateLocation(hrl(info2,sn2,9),false,true); assertEquals(sn2,list.getRegionLocation(2).getServerName()); assertEquals(sn3,list.getRegionLocation(9).getServerName()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveByServer(){ RegionLocations list; list=new RegionLocations(); assertTrue(list == list.removeByServer(sn0)); list=hrll(hrl(info0,sn0)); assertTrue(list == list.removeByServer(sn1)); list=list.removeByServer(sn0); assertEquals(0,list.numNonNullElements()); list=hrll(hrl(info0,sn0),hrl(info1,sn1),hrl(info2,sn2),hrl(info9,sn2)); assertTrue(list == list.removeByServer(sn3)); list=list.removeByServer(sn0); assertNull(list.getRegionLocation(0)); assertEquals(sn1,list.getRegionLocation(1).getServerName()); assertEquals(sn2,list.getRegionLocation(2).getServerName()); assertNull(list.getRegionLocation(5)); assertEquals(sn2,list.getRegionLocation(9).getServerName()); list=hrll(hrl(info0,sn1),hrl(info1,sn1),hrl(info2,sn0),hrl(info9,sn0)); list=list.removeByServer(sn0); assertEquals(sn1,list.getRegionLocation(0).getServerName()); assertEquals(sn1,list.getRegionLocation(1).getServerName()); assertNull(list.getRegionLocation(2)); assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSizeMethods(){ RegionLocations list=new RegionLocations(); assertTrue(list.isEmpty()); assertEquals(0,list.size()); assertEquals(0,list.numNonNullElements()); list=hrll((HRegionLocation)null); assertTrue(list.isEmpty()); assertEquals(1,list.size()); assertEquals(0,list.numNonNullElements()); HRegionInfo info0=hri(0); list=hrll(hrl(info0,null)); assertTrue(list.isEmpty()); assertEquals(1,list.size()); assertEquals(0,list.numNonNullElements()); HRegionInfo info9=hri(9); list=hrll(hrl(info9,null)); assertTrue(list.isEmpty()); assertEquals(10,list.size()); assertEquals(0,list.numNonNullElements()); list=hrll(hrl(info0,null),hrl(info9,null)); assertTrue(list.isEmpty()); assertEquals(10,list.size()); assertEquals(0,list.numNonNullElements()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMergeLocationsWithDifferentRegionId(){ RegionLocations list1, list2; HRegionInfo info0=hri(regionId1,0); HRegionInfo info1=hri(regionId1,1); HRegionInfo info2=hri(regionId2,2); list1=hrll(hrl(info2,sn1)); list2=hrll(hrl(info0,sn2),hrl(info1,sn2)); list1=list2.mergeLocations(list1); assertNull(list1.getRegionLocation(0)); assertNull(list1.getRegionLocation(1)); assertNotNull(list1.getRegionLocation(2)); assertEquals(sn1,list1.getRegionLocation(2).getServerName()); assertEquals(3,list1.size()); list1=hrll(hrl(info2,sn1)); list2=hrll(hrl(info0,sn2),hrl(info1,sn2)); list2=list1.mergeLocations(list2); assertNotNull(list2.getRegionLocation(0)); assertNotNull(list2.getRegionLocation(1)); assertNull(list2.getRegionLocation(2)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMergeLocations(){ RegionLocations list1, list2; list1=new RegionLocations(); list2=new RegionLocations(); assertTrue(list1 == list1.mergeLocations(list2)); list2=hrll(hrl(info0,sn0)); list1=list1.mergeLocations(list2); assertEquals(sn0,list1.getRegionLocation(0).getServerName()); list1=hrll(); list1=list2.mergeLocations(list1); assertEquals(sn0,list1.getRegionLocation(0).getServerName()); list1=hrll(hrl(info0,sn0),hrl(info1,sn1)); list2=hrll(hrl(info2,sn2)); list1=list2.mergeLocations(list1); assertEquals(sn0,list1.getRegionLocation(0).getServerName()); assertEquals(sn1,list1.getRegionLocation(1).getServerName()); assertEquals(2,list1.size()); list1=hrll(hrl(info0,sn0),hrl(info1,sn1)); list2=hrll(hrl(info2,sn2)); list1=list1.mergeLocations(list2); assertEquals(sn0,list1.getRegionLocation(0).getServerName()); assertEquals(sn1,list1.getRegionLocation(1).getServerName()); assertEquals(sn2,list1.getRegionLocation(2).getServerName()); list1=hrll(hrl(info0,sn0),hrl(info1,sn1)); list2=hrll(hrl(info0,sn2),hrl(info1,sn2),hrl(info9,sn3)); list1=list2.mergeLocations(list1); assertEquals(2,list1.size()); assertEquals(sn0,list1.getRegionLocation(0).getServerName()); assertEquals(sn1,list1.getRegionLocation(1).getServerName()); list1=hrll(hrl(info0,sn0),hrl(info1,sn1)); list2=hrll(hrl(info0,sn2),hrl(info1,sn2),hrl(info9,sn3)); list1=list1.mergeLocations(list2); assertEquals(10,list1.size()); assertEquals(sn2,list1.getRegionLocation(0).getServerName()); assertEquals(sn2,list1.getRegionLocation(1).getServerName()); assertEquals(sn3,list1.getRegionLocation(9).getServerName()); list1=hrll(hrl(info0,sn0,10),hrl(info1,sn1,10)); list2=hrll(hrl(info0,sn2,11),hrl(info1,sn2,11),hrl(info9,sn3,11)); list1=list1.mergeLocations(list2); assertEquals(10,list1.size()); assertEquals(sn2,list1.getRegionLocation(0).getServerName()); assertEquals(sn2,list1.getRegionLocation(1).getServerName()); assertEquals(sn3,list1.getRegionLocation(9).getServerName()); list1=hrll(hrl(info0,sn0,10),hrl(info1,sn1,10)); list2=hrll(hrl(info0,sn2,11),hrl(info1,sn2,11),hrl(info9,sn3,11)); list1=list1.mergeLocations(list2); assertEquals(10,list1.size()); assertEquals(sn2,list1.getRegionLocation(0).getServerName()); assertEquals(sn2,list1.getRegionLocation(1).getServerName()); assertEquals(sn3,list1.getRegionLocation(9).getServerName()); }

Class: org.apache.hadoop.hbase.TestRegionRebalancing

InternalCallVerifier EqualityVerifier 
/** * For HBASE-71. Try a few different configurations of starting and stopping * region servers to see if the assignment or regions is pretty balanced. * @throws IOException * @throws InterruptedException */ @Test(timeout=300000) public void testRebalanceOnRegionServerNumberChange() throws IOException, InterruptedException { try (Connection connection=ConnectionFactory.createConnection(UTIL.getConfiguration());Admin admin=connection.getAdmin()){ admin.createTable(this.desc,Arrays.copyOfRange(HBaseTestingUtility.KEYS,1,HBaseTestingUtility.KEYS.length)); this.regionLocator=connection.getRegionLocator(this.desc.getTableName()); MetaTableAccessor.fullScanMetaAndPrint(admin.getConnection()); assertEquals("Test table should have right number of regions",HBaseTestingUtility.KEYS.length,this.regionLocator.getStartKeys().length); assertRegionsAreBalanced(); LOG.info("Started second server=" + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); UTIL.getHBaseCluster().getMaster().balance(); assertRegionsAreBalanced(); assert (UTIL.getHBaseCluster().getMaster().balance() == true); LOG.info("Started third server=" + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); assert (UTIL.getHBaseCluster().getMaster().balance() == true); assertRegionsAreBalanced(); LOG.info("Stopped third server=" + UTIL.getHBaseCluster().stopRegionServer(2,false)); UTIL.getHBaseCluster().waitOnRegionServer(2); waitOnCrashProcessing(); UTIL.getHBaseCluster().getMaster().balance(); assertRegionsAreBalanced(); LOG.info("Readding third server=" + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); LOG.info("Added fourth server=" + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); waitOnCrashProcessing(); assert (UTIL.getHBaseCluster().getMaster().balance() == true); assertRegionsAreBalanced(); for (int i=0; i < 6; i++) { LOG.info("Adding " + (i + 5) + "th region server"); UTIL.getHBaseCluster().startRegionServer(); } assert (UTIL.getHBaseCluster().getMaster().balance() == true); assertRegionsAreBalanced(); regionLocator.close(); } }

Class: org.apache.hadoop.hbase.TestSerialization

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGet() throws Exception { byte[] row="row".getBytes(); byte[] fam="fam".getBytes(); byte[] qf1="qf1".getBytes(); long ts=System.currentTimeMillis(); int maxVersions=2; Get get=new Get(row); get.addColumn(fam,qf1); get.setTimeRange(ts,ts + 1); get.setMaxVersions(maxVersions); ClientProtos.Get getProto=ProtobufUtil.toGet(get); Get desGet=ProtobufUtil.toGet(getProto); assertTrue(Bytes.equals(get.getRow(),desGet.getRow())); Set set=null; Set desSet=null; for ( Map.Entry> entry : get.getFamilyMap().entrySet()) { assertTrue(desGet.getFamilyMap().containsKey(entry.getKey())); set=entry.getValue(); desSet=desGet.getFamilyMap().get(entry.getKey()); for ( byte[] qualifier : set) { assertTrue(desSet.contains(qualifier)); } } assertEquals(get.getMaxVersions(),desGet.getMaxVersions()); TimeRange tr=get.getTimeRange(); TimeRange desTr=desGet.getTimeRange(); assertEquals(tr.getMax(),desTr.getMax()); assertEquals(tr.getMin(),desTr.getMin()); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testCompareFilter() throws Exception { Filter f=new RowFilter(CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("testRowOne-2"))); byte[] bytes=f.toByteArray(); Filter ff=RowFilter.parseFrom(bytes); assertNotNull(ff); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testSplitLogTask() throws DeserializationException { SplitLogTask slt=new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"),RecoveryMode.LOG_REPLAY); byte[] bytes=slt.toByteArray(); SplitLogTask sltDeserialized=SplitLogTask.parseFrom(bytes); assertTrue(slt.equals(sltDeserialized)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testTableDescriptor() throws Exception { final String name="testTableDescriptor"; HTableDescriptor htd=createTableDescriptor(name); byte[] mb=htd.toByteArray(); HTableDescriptor deserializedHtd=HTableDescriptor.parseFrom(mb); assertEquals(htd.getTableName(),deserializedHtd.getTableName()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test RegionInfo serialization * @throws Exception */ @Test public void testRegionInfo() throws Exception { HRegionInfo hri=createRandomRegion("testRegionInfo"); byte[] hrib=hri.toByteArray(); HRegionInfo deserializedHri=HRegionInfo.parseFrom(hrib); assertEquals(hri.getEncodedName(),deserializedHri.getEncodedName()); assertEquals(hri,deserializedHri); hrib=hri.toDelimitedByteArray(); DataInputBuffer buf=new DataInputBuffer(); try { buf.reset(hrib,hrib.length); deserializedHri=HRegionInfo.parseFrom(buf); assertEquals(hri.getEncodedName(),deserializedHri.getEncodedName()); assertEquals(hri,deserializedHri); } finally { buf.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKeyValue() throws Exception { final String name="testKeyValue2"; byte[] row=name.getBytes(); byte[] fam="fam".getBytes(); byte[] qf="qf".getBytes(); long ts=System.currentTimeMillis(); byte[] val="val".getBytes(); KeyValue kv=new KeyValue(row,fam,qf,ts,val); ByteArrayOutputStream baos=new ByteArrayOutputStream(); DataOutputStream dos=new DataOutputStream(baos); long l=KeyValueUtil.write(kv,dos); dos.close(); byte[] mb=baos.toByteArray(); ByteArrayInputStream bais=new ByteArrayInputStream(mb); DataInputStream dis=new DataInputStream(bais); KeyValue deserializedKv=KeyValueUtil.create(dis); assertTrue(Bytes.equals(kv.getBuffer(),deserializedKv.getBuffer())); assertEquals(kv.getOffset(),deserializedKv.getOffset()); assertEquals(kv.getLength(),deserializedKv.getLength()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScan() throws Exception { byte[] startRow="startRow".getBytes(); byte[] stopRow="stopRow".getBytes(); byte[] fam="fam".getBytes(); byte[] qf1="qf1".getBytes(); long ts=System.currentTimeMillis(); int maxVersions=2; Scan scan=new Scan(startRow,stopRow); scan.addColumn(fam,qf1); scan.setTimeRange(ts,ts + 1); scan.setMaxVersions(maxVersions); ClientProtos.Scan scanProto=ProtobufUtil.toScan(scan); Scan desScan=ProtobufUtil.toScan(scanProto); assertTrue(Bytes.equals(scan.getStartRow(),desScan.getStartRow())); assertTrue(Bytes.equals(scan.getStopRow(),desScan.getStopRow())); assertEquals(scan.getCacheBlocks(),desScan.getCacheBlocks()); Set set=null; Set desSet=null; for ( Map.Entry> entry : scan.getFamilyMap().entrySet()) { assertTrue(desScan.getFamilyMap().containsKey(entry.getKey())); set=entry.getValue(); desSet=desScan.getFamilyMap().get(entry.getKey()); for ( byte[] column : set) { assertTrue(desSet.contains(column)); } scan=new Scan(startRow); final String name="testScan"; byte[] prefix=Bytes.toBytes(name); scan.setFilter(new PrefixFilter(prefix)); scanProto=ProtobufUtil.toScan(scan); desScan=ProtobufUtil.toScan(scanProto); Filter f=desScan.getFilter(); assertTrue(f instanceof PrefixFilter); } assertEquals(scan.getMaxVersions(),desScan.getMaxVersions()); TimeRange tr=scan.getTimeRange(); TimeRange desTr=desScan.getTimeRange(); assertEquals(tr.getMax(),desTr.getMax()); assertEquals(tr.getMin(),desTr.getMin()); }

Class: org.apache.hadoop.hbase.TestServerLoad

InternalCallVerifier EqualityVerifier 
@Test public void testRegionLoadAggregation(){ ServerLoad sl=new ServerLoad(createServerLoadProto()); assertEquals(13,sl.getStores()); assertEquals(114,sl.getStorefiles()); assertEquals(129,sl.getStoreUncompressedSizeMB()); assertEquals(504,sl.getRootIndexSizeKB()); assertEquals(820,sl.getStorefileSizeInMB()); assertEquals(82,sl.getStorefileIndexSizeInMB()); assertEquals(((long)Integer.MAX_VALUE) * 2,sl.getReadRequestsCount()); }

InternalCallVerifier BooleanVerifier 
@Test public void testToString(){ ServerLoad sl=new ServerLoad(createServerLoadProto()); String slToString=sl.toString(); assertTrue(slToString.contains("numberOfStores=13")); assertTrue(slToString.contains("numberOfStorefiles=114")); assertTrue(slToString.contains("storefileUncompressedSizeMB=129")); assertTrue(slToString.contains("storefileSizeMB=820")); assertTrue(slToString.contains("rootIndexSizeKB=504")); assertTrue(slToString.contains("coprocessors=[]")); }

InternalCallVerifier EqualityVerifier 
@Test public void testRegionLoadWrapAroundAggregation(){ ServerLoad sl=new ServerLoad(createServerLoadProto()); long totalCount=((long)Integer.MAX_VALUE) * 2; assertEquals(totalCount,sl.getReadRequestsCount()); assertEquals(totalCount,sl.getWriteRequestsCount()); }

Class: org.apache.hadoop.hbase.TestServerName

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testParseOfBytes(){ final String snStr="www.example.org,1234,5678"; ServerName sn=ServerName.valueOf(snStr); byte[] versionedBytes=sn.getVersionedBytes(); assertEquals(sn.toString(),ServerName.parseVersionedServerName(versionedBytes).toString()); final String hostnamePortStr=sn.getHostAndPort(); byte[] bytes=Bytes.toBytes(hostnamePortStr); String expecting=hostnamePortStr.replace(":",ServerName.SERVERNAME_SEPARATOR) + ServerName.SERVERNAME_SEPARATOR + ServerName.NON_STARTCODE; assertEquals(expecting,ServerName.parseVersionedServerName(bytes).toString()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHostNameCaseSensitivity(){ ServerName lower=ServerName.valueOf("www.example.org",1234,5678); ServerName upper=ServerName.valueOf("www.EXAMPLE.org",1234,5678); assertEquals(0,lower.compareTo(upper)); assertEquals(0,upper.compareTo(lower)); assertEquals(lower.hashCode(),upper.hashCode()); assertTrue(lower.equals(upper)); assertTrue(upper.equals(lower)); assertTrue(ServerName.isSameHostnameAndPort(lower,upper)); }

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testServerName(){ ServerName sn=ServerName.valueOf("www.example.org",1234,5678); ServerName sn2=ServerName.valueOf("www.example.org",1234,5678); ServerName sn3=ServerName.valueOf("www.example.org",1234,56789); assertTrue(sn.equals(sn2)); assertFalse(sn.equals(sn3)); assertEquals(sn.hashCode(),sn2.hashCode()); assertNotSame(sn.hashCode(),sn3.hashCode()); assertEquals(sn.toString(),ServerName.getServerName("www.example.org",1234,5678)); assertEquals(sn.toString(),ServerName.getServerName("www.example.org:1234",5678)); assertEquals(sn.toString(),"www.example.org" + ServerName.SERVERNAME_SEPARATOR + "1234"+ ServerName.SERVERNAME_SEPARATOR+ "5678"); }

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void getServerStartcodeFromServerName(){ ServerName sn=ServerName.valueOf("www.example.org",1234,5678); assertEquals(5678,ServerName.getServerStartcodeFromServerName(sn.toString())); assertNotSame(5677,ServerName.getServerStartcodeFromServerName(sn.toString())); }

Class: org.apache.hadoop.hbase.TestTableDescriptor

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPb() throws DeserializationException, IOException { HTableDescriptor htd=new HTableDescriptor(TableName.META_TABLE_NAME); final int v=123; htd.setMaxFileSize(v); htd.setDurability(Durability.ASYNC_WAL); htd.setReadOnly(true); htd.setRegionReplication(2); TableDescriptor td=new TableDescriptor(htd); byte[] bytes=td.toByteArray(); TableDescriptor deserializedTd=TableDescriptor.parseFrom(bytes); assertEquals(td,deserializedTd); assertEquals(td.getHTableDescriptor(),deserializedTd.getHTableDescriptor()); }

Class: org.apache.hadoop.hbase.TestTagRewriteCell

InternalCallVerifier BooleanVerifier 
@Test public void testHeapSize(){ Cell originalCell=CellUtil.createCell(Bytes.toBytes("row"),Bytes.toBytes("value")); final int fakeTagArrayLength=10; TagRewriteCell trCell=new TagRewriteCell(originalCell,new byte[fakeTagArrayLength]); long trCellHeapSize=trCell.heapSize(); TagRewriteCell trCell2=new TagRewriteCell(trCell,new byte[fakeTagArrayLength]); assertTrue("TagRewriteCell containing a TagRewriteCell's heapsize should be larger than a " + "single TagRewriteCell's heapsize",trCellHeapSize < trCell2.heapSize()); assertTrue("TagRewriteCell should have had nulled out tags array",trCell.heapSize() < trCellHeapSize); }

Class: org.apache.hadoop.hbase.TestZooKeeper

UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier 
/** * Create a bunch of znodes in a hierarchy, try deleting one that has childs (it will fail), then * delete it recursively, then delete the last znode * @throws Exception */ @Test public void testZNodeDeletes() throws Exception { ZooKeeperWatcher zkw=new ZooKeeperWatcher(new Configuration(TEST_UTIL.getConfiguration()),TestZooKeeper.class.getName(),null); ZKUtil.createWithParents(zkw,"/l1/l2/l3/l4"); try { ZKUtil.deleteNode(zkw,"/l1/l2"); fail("We should not be able to delete if znode has childs"); } catch ( KeeperException ex) { assertNotNull(ZKUtil.getDataNoWatch(zkw,"/l1/l2/l3/l4",null)); } ZKUtil.deleteNodeRecursively(zkw,"/l1/l2"); assertNull(ZKUtil.getDataNoWatch(zkw,"/l1/l2/l3/l4",null)); ZKUtil.deleteNodeRecursively(zkw,"/l1/l2"); ZKUtil.deleteNode(zkw,"/l1"); assertNull(ZKUtil.getDataNoWatch(zkw,"/l1/l2",null)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Create a znode with data * @throws Exception */ @Test public void testCreateWithParents() throws Exception { ZooKeeperWatcher zkw=new ZooKeeperWatcher(new Configuration(TEST_UTIL.getConfiguration()),TestZooKeeper.class.getName(),null); byte[] expectedData=new byte[]{1,2,3}; ZKUtil.createWithParents(zkw,"/l1/l2/l3/l4/testCreateWithParents",expectedData); byte[] data=ZKUtil.getData(zkw,"/l1/l2/l3/l4/testCreateWithParents"); assertTrue(Bytes.equals(expectedData,data)); ZKUtil.deleteNodeRecursively(zkw,"/l1"); ZKUtil.createWithParents(zkw,"/testCreateWithParents",expectedData); data=ZKUtil.getData(zkw,"/testCreateWithParents"); assertTrue(Bytes.equals(expectedData,data)); ZKUtil.deleteNodeRecursively(zkw,"/testCreateWithParents"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that the master does not call retainAssignment after recovery from expired zookeeper * session. Without the HBASE-6046 fix master always tries to assign all the user regions by * calling retainAssignment. */ @Test(timeout=300000) public void testRegionAssignmentAfterMasterRecoveryDueToZKExpiry() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); cluster.startRegionServer(); cluster.waitForActiveAndReadyMaster(10000); HMaster m=cluster.getMaster(); final ZooKeeperWatcher zkw=m.getZooKeeper(); try (Admin admin=TEST_UTIL.getHBaseAdmin()){ byte[][] SPLIT_KEYS=new byte[][]{Bytes.toBytes("a"),Bytes.toBytes("b"),Bytes.toBytes("c"),Bytes.toBytes("d"),Bytes.toBytes("e"),Bytes.toBytes("f"),Bytes.toBytes("g"),Bytes.toBytes("h"),Bytes.toBytes("i"),Bytes.toBytes("j")}; String tableName="testRegionAssignmentAfterMasterRecoveryDueToZKExpiry"; HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(htd,SPLIT_KEYS); TEST_UTIL.waitUntilNoRegionsInTransition(60000); m.getZooKeeper().close(); MockLoadBalancer.retainAssignCalled=false; final int expectedNumOfListeners=countPermanentListeners(zkw); m.abort("Test recovery from zk session expired",new KeeperException.SessionExpiredException()); assertTrue(m.isStopped()); assertFalse("Retain assignment should not be called",MockLoadBalancer.retainAssignCalled); cluster.waitForActiveAndReadyMaster(120000); final HMaster newMaster=cluster.getMasterThread().getMaster(); assertEquals(expectedNumOfListeners,countPermanentListeners(newMaster.getZooKeeper())); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testMultipleZK() throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException { Table localMeta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); Configuration otherConf=new Configuration(TEST_UTIL.getConfiguration()); otherConf.set(HConstants.ZOOKEEPER_QUORUM,"127.0.0.1"); Connection connection=ConnectionFactory.createConnection(otherConf); Table ipMeta=connection.getTable(TableName.META_TABLE_NAME); final byte[] row=new byte[]{'r'}; localMeta.exists(new Get(row)); ipMeta.exists(new Get(row)); ZooKeeperWatcher z1=getZooKeeperWatcher(ConnectionFactory.createConnection(localMeta.getConfiguration())); ZooKeeperWatcher z2=getZooKeeperWatcher(ConnectionFactory.createConnection(otherConf)); assertFalse(z1 == z2); assertFalse(z1.getQuorum().equals(z2.getQuorum())); localMeta.close(); ipMeta.close(); connection.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests whether the logs are split when master recovers from a expired zookeeper session and an * RS goes down. */ @Test(timeout=300000) public void testLogSplittingAfterMasterRecoveryDueToZKExpiry() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); cluster.startRegionServer(); HMaster m=cluster.getMaster(); Admin admin=TEST_UTIL.getHBaseAdmin(); Table table=null; try { byte[][] SPLIT_KEYS=new byte[][]{Bytes.toBytes("1"),Bytes.toBytes("2"),Bytes.toBytes("3"),Bytes.toBytes("4"),Bytes.toBytes("5")}; String tableName="testLogSplittingAfterMasterRecoveryDueToZKExpiry"; HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor hcd=new HColumnDescriptor("col"); htd.addFamily(hcd); admin.createTable(htd,SPLIT_KEYS); TEST_UTIL.waitUntilNoRegionsInTransition(60000); table=TEST_UTIL.getConnection().getTable(htd.getTableName()); Put p; int numberOfPuts; for (numberOfPuts=0; numberOfPuts < 6; numberOfPuts++) { p=new Put(Bytes.toBytes(numberOfPuts)); p.addColumn(Bytes.toBytes("col"),Bytes.toBytes("ql"),Bytes.toBytes("value" + numberOfPuts)); table.put(p); } m.getZooKeeper().close(); m.abort("Test recovery from zk session expired",new KeeperException.SessionExpiredException()); assertTrue(m.isStopped()); cluster.getRegionServer(0).abort("Aborting"); Scan scan=new Scan(); int numberOfRows=0; ResultScanner scanner=table.getScanner(scan); Result[] result=scanner.next(1); while (result != null && result.length > 0) { numberOfRows++; result=scanner.next(1); } assertEquals("Number of rows should be equal to number of puts.",numberOfPuts,numberOfRows); } finally { if (table != null) table.close(); admin.close(); } }

InternalCallVerifier BooleanVerifier 
/** * Master recovery when the znode already exists. Internally, this * test differs from {@link #testMasterSessionExpired} because here * the master znode will exist in ZK. */ @Test(timeout=300000) public void testMasterZKSessionRecoveryFailure() throws Exception { LOG.info("Starting testMasterZKSessionRecoveryFailure"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HMaster m=cluster.getMaster(); m.abort("Test recovery from zk session expired",new KeeperException.SessionExpiredException()); assertTrue(m.isStopped()); testSanity("testMasterZKSessionRecoveryFailure"); }

Class: org.apache.hadoop.hbase.backup.TestHFileArchiving

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testArchiveOnTableDelete() throws Exception { TableName TABLE_NAME=TableName.valueOf("testArchiveOnTableDelete"); UTIL.createTable(TABLE_NAME,TEST_FAM); List servingRegions=UTIL.getHBaseCluster().getRegions(TABLE_NAME); assertEquals(1,servingRegions.size()); Region region=servingRegions.get(0); HRegionServer hrs=UTIL.getRSForFirstRegionInTable(TABLE_NAME); FileSystem fs=hrs.getFileSystem(); LOG.debug("-------Loading table"); UTIL.loadRegion(region,TEST_FAM); List regions=hrs.getOnlineRegions(TABLE_NAME); assertEquals("More that 1 region for test table.",1,regions.size()); region=regions.get(0); region.waitForFlushesAndCompactions(); UTIL.getHBaseAdmin().disableTable(TABLE_NAME); LOG.debug("Disabled table"); clearArchiveDirectory(); byte[][] columns=region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]); List storeFiles=region.getStoreFileList(columns); UTIL.deleteTable(TABLE_NAME); LOG.debug("Deleted table"); assertArchiveFiles(fs,storeFiles,30000); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that the store files are archived when a column family is removed. * @throws Exception */ @Test public void testArchiveOnTableFamilyDelete() throws Exception { TableName TABLE_NAME=TableName.valueOf("testArchiveOnTableFamilyDelete"); UTIL.createTable(TABLE_NAME,new byte[][]{TEST_FAM,Bytes.toBytes("fam2")}); List servingRegions=UTIL.getHBaseCluster().getRegions(TABLE_NAME); assertEquals(1,servingRegions.size()); Region region=servingRegions.get(0); HRegionServer hrs=UTIL.getRSForFirstRegionInTable(TABLE_NAME); FileSystem fs=hrs.getFileSystem(); LOG.debug("-------Loading table"); UTIL.loadRegion(region,TEST_FAM); List regions=hrs.getOnlineRegions(TABLE_NAME); assertEquals("More that 1 region for test table.",1,regions.size()); region=regions.get(0); region.waitForFlushesAndCompactions(); UTIL.getHBaseAdmin().disableTable(TABLE_NAME); LOG.debug("Disabled table"); clearArchiveDirectory(); byte[][] columns=region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]); List storeFiles=region.getStoreFileList(columns); UTIL.getHBaseAdmin().deleteColumnFamily(TABLE_NAME,TEST_FAM); assertArchiveFiles(fs,storeFiles,30000); UTIL.deleteTable(TABLE_NAME); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemovesRegionDirOnArchive() throws Exception { TableName TABLE_NAME=TableName.valueOf("testRemovesRegionDirOnArchive"); UTIL.createTable(TABLE_NAME,TEST_FAM); final Admin admin=UTIL.getHBaseAdmin(); List servingRegions=UTIL.getHBaseCluster().getRegions(TABLE_NAME); assertEquals(1,servingRegions.size()); HRegion region=servingRegions.get(0); UTIL.loadRegion(region,TEST_FAM); admin.disableTable(TABLE_NAME); FileSystem fs=UTIL.getTestFileSystem(); Path rootDir=region.getRegionFileSystem().getTableDir().getParent(); Path regionDir=HRegion.getRegionDir(rootDir,region.getRegionInfo()); HFileArchiver.archiveRegion(UTIL.getConfiguration(),fs,region.getRegionInfo()); Path archiveDir=HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(),region); assertTrue(fs.exists(archiveDir)); FileStatus[] stores=fs.listStatus(archiveDir,new PathFilter(){ @Override public boolean accept( Path p){ if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { return false; } return true; } } ); assertTrue(stores.length == 1); FileStatus[] storeFiles=fs.listStatus(stores[0].getPath()); assertTrue(storeFiles.length > 0); assertFalse(fs.exists(regionDir)); UTIL.deleteTable(TABLE_NAME); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that the region directory is removed when we archive a region without store files, but * still has hidden files. * @throws Exception */ @Test public void testDeleteRegionWithNoStoreFiles() throws Exception { TableName TABLE_NAME=TableName.valueOf("testDeleteRegionWithNoStoreFiles"); UTIL.createTable(TABLE_NAME,TEST_FAM); List servingRegions=UTIL.getHBaseCluster().getRegions(TABLE_NAME); assertEquals(1,servingRegions.size()); HRegion region=servingRegions.get(0); FileSystem fs=region.getRegionFileSystem().getFileSystem(); Path rootDir=FSUtils.getRootDir(fs.getConf()); Path regionDir=HRegion.getRegionDir(rootDir,region.getRegionInfo()); FileStatus[] regionFiles=FSUtils.listStatus(fs,regionDir,null); Assert.assertNotNull("No files in the region directory",regionFiles); if (LOG.isDebugEnabled()) { List files=new ArrayList(); for ( FileStatus file : regionFiles) { files.add(file.getPath()); } LOG.debug("Current files:" + files); } final PathFilter dirFilter=new FSUtils.DirFilter(fs); PathFilter nonHidden=new PathFilter(){ @Override public boolean accept( Path file){ return dirFilter.accept(file) && !file.getName().toString().startsWith("."); } } ; FileStatus[] storeDirs=FSUtils.listStatus(fs,regionDir,nonHidden); for ( FileStatus store : storeDirs) { LOG.debug("Deleting store for test"); fs.delete(store.getPath(),true); } HFileArchiver.archiveRegion(UTIL.getConfiguration(),fs,region.getRegionInfo()); assertFalse("Region directory (" + regionDir + "), still exists.",fs.exists(regionDir)); UTIL.deleteTable(TABLE_NAME); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
/** * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643 */ @Test public void testCleaningRace() throws Exception { final long TEST_TIME=20 * 1000; final ChoreService choreService=new ChoreService("TEST_SERVER_NAME"); Configuration conf=UTIL.getMiniHBaseCluster().getMaster().getConfiguration(); Path rootDir=UTIL.getDataTestDirOnTestFS("testCleaningRace"); FileSystem fs=UTIL.getTestFileSystem(); Path archiveDir=new Path(rootDir,HConstants.HFILE_ARCHIVE_DIRECTORY); Path regionDir=new Path(FSUtils.getTableDir(new Path("./"),TableName.valueOf("table")),"abcdef"); Path familyDir=new Path(regionDir,"cf"); Path sourceRegionDir=new Path(rootDir,regionDir); fs.mkdirs(sourceRegionDir); Stoppable stoppable=new StoppableImplementation(); HFileCleaner cleaner=new HFileCleaner(1,stoppable,conf,fs,archiveDir); try { choreService.scheduleChore(cleaner); long startTime=System.currentTimeMillis(); for (long fid=0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) { Path file=new Path(familyDir,String.valueOf(fid)); Path sourceFile=new Path(rootDir,file); Path archiveFile=new Path(archiveDir,file); fs.createNewFile(sourceFile); try { HFileArchiver.archiveRegion(fs,rootDir,sourceRegionDir.getParent(),sourceRegionDir); LOG.debug("hfile=" + fid + " should be in the archive"); assertTrue(fs.exists(archiveFile)); assertFalse(fs.exists(sourceFile)); } catch ( IOException e) { LOG.debug("hfile=" + fid + " should be in the source location"); assertFalse(fs.exists(archiveFile)); assertTrue(fs.exists(sourceFile)); fs.delete(sourceFile,false); } } } finally { stoppable.stop("test end"); cleaner.cancel(true); choreService.shutdown(); fs.delete(rootDir,true); } }

Class: org.apache.hadoop.hbase.backup.example.TestZooKeeperTableArchiveClient

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test archiving/cleaning across multiple tables, where some are retained, and others aren't * @throws Exception on failure */ @Test(timeout=300000) public void testMultipleTables() throws Exception { createArchiveDirectory(); String otherTable="otherTable"; FileSystem fs=UTIL.getTestFileSystem(); Path archiveDir=getArchiveDir(); Path tableDir=getTableDir(STRING_TABLE_NAME); Path otherTableDir=getTableDir(otherTable); toCleanup.add(archiveDir); toCleanup.add(tableDir); toCleanup.add(otherTableDir); Configuration conf=UTIL.getConfiguration(); Stoppable stop=new StoppableImplementation(); final ChoreService choreService=new ChoreService("TEST_SERVER_NAME"); HFileCleaner cleaner=setupAndCreateCleaner(conf,fs,archiveDir,stop); List cleaners=turnOnArchiving(STRING_TABLE_NAME,cleaner); final LongTermArchivingHFileCleaner delegate=(LongTermArchivingHFileCleaner)cleaners.get(0); HColumnDescriptor hcd=new HColumnDescriptor(TEST_FAM); HRegion region=UTIL.createTestRegion(STRING_TABLE_NAME,hcd); List regions=new ArrayList(); regions.add(region); when(rss.getOnlineRegions()).thenReturn(regions); final CompactedHFilesDischarger compactionCleaner=new CompactedHFilesDischarger(100,stop,rss,false); loadFlushAndCompact(region,TEST_FAM); compactionCleaner.chore(); hcd=new HColumnDescriptor(TEST_FAM); HRegion otherRegion=UTIL.createTestRegion(otherTable,hcd); regions=new ArrayList(); regions.add(otherRegion); when(rss.getOnlineRegions()).thenReturn(regions); final CompactedHFilesDischarger compactionCleaner1=new CompactedHFilesDischarger(100,stop,rss,false); loadFlushAndCompact(otherRegion,TEST_FAM); compactionCleaner1.chore(); List files=getAllFiles(fs,archiveDir); if (files == null) { FSUtils.logFileSystemState(fs,archiveDir,LOG); throw new RuntimeException("Didn't load archive any files!"); } int initialCountForPrimary=0; int initialCountForOtherTable=0; for ( Path file : files) { String tableName=file.getParent().getParent().getParent().getName(); if (tableName.equals(otherTable)) initialCountForOtherTable++; else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++; } assertTrue("Didn't archive files for:" + STRING_TABLE_NAME,initialCountForPrimary > 0); assertTrue("Didn't archive files for:" + otherTable,initialCountForOtherTable > 0); CountDownLatch finished=setupCleanerWatching(delegate,cleaners,files.size() + 3); choreService.scheduleChore(cleaner); finished.await(); stop.stop(""); List archivedFiles=getAllFiles(fs,archiveDir); int archivedForPrimary=0; for ( Path file : archivedFiles) { String tableName=file.getParent().getParent().getParent().getName(); assertFalse("Have a file from the non-archived table: " + file,tableName.equals(otherTable)); if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++; } assertEquals("Not all archived files for the primary table were retained.",initialCountForPrimary,archivedForPrimary); assertTrue("Archive directory was deleted via archiver",fs.exists(archiveDir)); }

InternalCallVerifier BooleanVerifier 
/** * Test turning on/off archiving */ @Test(timeout=300000) public void testArchivingEnableDisable() throws Exception { LOG.debug("----Starting archiving"); archivingClient.enableHFileBackupAsync(TABLE_NAME); assertTrue("Archving didn't get turned on",archivingClient.getArchivingEnabled(TABLE_NAME)); archivingClient.disableHFileBackup(); assertFalse("Archving didn't get turned off.",archivingClient.getArchivingEnabled(TABLE_NAME)); archivingClient.enableHFileBackupAsync(TABLE_NAME); assertTrue("Archving didn't get turned on",archivingClient.getArchivingEnabled(TABLE_NAME)); archivingClient.disableHFileBackup(TABLE_NAME); assertFalse("Archving didn't get turned off for " + STRING_TABLE_NAME,archivingClient.getArchivingEnabled(TABLE_NAME)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testArchivingOnSingleTable() throws Exception { createArchiveDirectory(); FileSystem fs=UTIL.getTestFileSystem(); Path archiveDir=getArchiveDir(); Path tableDir=getTableDir(STRING_TABLE_NAME); toCleanup.add(archiveDir); toCleanup.add(tableDir); Configuration conf=UTIL.getConfiguration(); Stoppable stop=new StoppableImplementation(); HFileCleaner cleaner=setupAndCreateCleaner(conf,fs,archiveDir,stop); List cleaners=turnOnArchiving(STRING_TABLE_NAME,cleaner); final LongTermArchivingHFileCleaner delegate=(LongTermArchivingHFileCleaner)cleaners.get(0); HColumnDescriptor hcd=new HColumnDescriptor(TEST_FAM); HRegion region=UTIL.createTestRegion(STRING_TABLE_NAME,hcd); List regions=new ArrayList(); regions.add(region); when(rss.getOnlineRegions()).thenReturn(regions); final CompactedHFilesDischarger compactionCleaner=new CompactedHFilesDischarger(100,stop,rss,false); loadFlushAndCompact(region,TEST_FAM); compactionCleaner.chore(); List files=getAllFiles(fs,archiveDir); if (files == null) { FSUtils.logFileSystemState(fs,UTIL.getDataTestDir(),LOG); throw new RuntimeException("Didn't archive any files!"); } CountDownLatch finished=setupCleanerWatching(delegate,cleaners,files.size()); runCleaner(cleaner,finished,stop); List archivedFiles=getAllFiles(fs,archiveDir); assertEquals("Archived files changed after running archive cleaner.",files,archivedFiles); assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()))); }

Class: org.apache.hadoop.hbase.client.TestAdmin1

InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testTableAvailableWithRandomSplitKeys() throws Exception { TableName tableName=TableName.valueOf("testTableAvailableWithRandomSplitKeys"); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor("col")); byte[][] splitKeys=new byte[1][]; splitKeys=new byte[][]{new byte[]{1,1,1},new byte[]{2,2,2}}; admin.createTable(desc); boolean tableAvailable=admin.isTableAvailable(tableName,splitKeys); assertFalse("Table should be created with 1 row in META",tableAvailable); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Verify schema modification takes. * @throws IOException * @throws InterruptedException */ @Test(timeout=300000) public void testOnlineChangeTableSchema() throws IOException, InterruptedException { final TableName tableName=TableName.valueOf("changeTableSchemaOnline"); TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean("hbase.online.schema.update.enable",true); HTableDescriptor[] tables=admin.listTables(); int numTables=tables.length; TEST_UTIL.createTable(tableName,HConstants.CATALOG_FAMILY).close(); tables=this.admin.listTables(); assertEquals(numTables + 1,tables.length); HTableDescriptor htd=this.admin.getTableDescriptor(tableName); HTableDescriptor copy=new HTableDescriptor(htd); assertTrue(htd.equals(copy)); long newFlushSize=htd.getMemStoreFlushSize() / 2; if (newFlushSize <= 0) { newFlushSize=HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2; } copy.setMemStoreFlushSize(newFlushSize); final String key="anyoldkey"; assertTrue(htd.getValue(key) == null); copy.setValue(key,key); boolean expectedException=false; try { admin.modifyTable(tableName,copy); } catch ( TableNotDisabledException re) { expectedException=true; } assertFalse(expectedException); HTableDescriptor modifiedHtd=this.admin.getTableDescriptor(tableName); assertFalse(htd.equals(modifiedHtd)); assertTrue(copy.equals(modifiedHtd)); assertEquals(newFlushSize,modifiedHtd.getMemStoreFlushSize()); assertEquals(key,modifiedHtd.getValue(key)); int countOfFamilies=modifiedHtd.getFamilies().size(); assertTrue(countOfFamilies > 0); HColumnDescriptor hcd=modifiedHtd.getFamilies().iterator().next(); int maxversions=hcd.getMaxVersions(); final int newMaxVersions=maxversions + 1; hcd.setMaxVersions(newMaxVersions); final byte[] hcdName=hcd.getName(); expectedException=false; try { this.admin.modifyColumnFamily(tableName,hcd); } catch ( TableNotDisabledException re) { expectedException=true; } assertFalse(expectedException); modifiedHtd=this.admin.getTableDescriptor(tableName); HColumnDescriptor modifiedHcd=modifiedHtd.getFamily(hcdName); assertEquals(newMaxVersions,modifiedHcd.getMaxVersions()); assertFalse(this.admin.isTableDisabled(tableName)); final String xtracolName="xtracol"; HColumnDescriptor xtracol=new HColumnDescriptor(xtracolName); xtracol.setValue(xtracolName,xtracolName); expectedException=false; try { this.admin.addColumnFamily(tableName,xtracol); } catch ( TableNotDisabledException re) { expectedException=true; } assertFalse(expectedException); modifiedHtd=this.admin.getTableDescriptor(tableName); hcd=modifiedHtd.getFamily(xtracol.getName()); assertTrue(hcd != null); assertTrue(hcd.getValue(xtracolName).equals(xtracolName)); this.admin.deleteColumnFamily(tableName,xtracol.getName()); modifiedHtd=this.admin.getTableDescriptor(tableName); hcd=modifiedHtd.getFamily(xtracol.getName()); assertTrue(hcd == null); this.admin.disableTable(tableName); this.admin.deleteTable(tableName); this.admin.listTables(); assertFalse(this.admin.tableExists(tableName)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testCompactionTimestamps() throws Exception { HColumnDescriptor fam1=new HColumnDescriptor("fam1"); TableName tableName=TableName.valueOf("testCompactionTimestampsTable"); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(fam1); this.admin.createTable(htd); Table table=TEST_UTIL.getConnection().getTable(htd.getTableName()); long ts=this.admin.getLastMajorCompactionTimestamp(tableName); assertEquals(0,ts); Put p=new Put(Bytes.toBytes("row1")); p.addColumn(Bytes.toBytes("fam1"),Bytes.toBytes("fam1"),Bytes.toBytes("fam1")); table.put(p); ts=this.admin.getLastMajorCompactionTimestamp(tableName); assertEquals(0,ts); this.admin.flush(tableName); ts=this.admin.getLastMajorCompactionTimestamp(tableName); assertEquals(0,ts); byte[] regionName; try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(tableName)){ regionName=l.getAllRegionLocations().get(0).getRegionInfo().getRegionName(); } long ts1=this.admin.getLastMajorCompactionTimestampForRegion(regionName); assertEquals(ts,ts1); p=new Put(Bytes.toBytes("row2")); p.addColumn(Bytes.toBytes("fam1"),Bytes.toBytes("fam1"),Bytes.toBytes("fam1")); table.put(p); this.admin.flush(tableName); ts=this.admin.getLastMajorCompactionTimestamp(tableName); assertEquals(ts1,ts); TEST_UTIL.compact(tableName,true); table.put(p); this.admin.flush(tableName); ts=this.admin.getLastMajorCompactionTimestamp(tableName); assertTrue(ts > ts1); ts1=this.admin.getLastMajorCompactionTimestampForRegion(regionName); assertEquals(ts,ts1); table.put(p); this.admin.flush(tableName); ts=this.admin.getLastMajorCompactionTimestamp(tableName); assertEquals(ts,ts1); table.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test retain assignment on enableTable. * @throws IOException */ @Test(timeout=300000) public void testEnableTableRetainAssignment() throws IOException { final TableName tableName=TableName.valueOf("testEnableTableAssignment"); byte[][] splitKeys={new byte[]{1,1,1},new byte[]{2,2,2},new byte[]{3,3,3},new byte[]{4,4,4},new byte[]{5,5,5},new byte[]{6,6,6},new byte[]{7,7,7},new byte[]{8,8,8},new byte[]{9,9,9}}; int expectedRegions=splitKeys.length + 1; HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc,splitKeys); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(tableName)){ List regions=l.getAllRegionLocations(); assertEquals("Tried to create " + expectedRegions + " regions "+ "but only found "+ regions.size(),expectedRegions,regions.size()); admin.disableTable(tableName); admin.enableTable(tableName); List regions2=l.getAllRegionLocations(); assertEquals(regions.size(),regions2.size()); assertTrue(regions2.containsAll(regions)); } }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=300000) public void testCreateTable() throws IOException { HTableDescriptor[] tables=admin.listTables(); int numTables=tables.length; TableName tableName=TableName.valueOf("testCreateTable"); TEST_UTIL.createTable(tableName,HConstants.CATALOG_FAMILY).close(); tables=this.admin.listTables(); assertEquals(numTables + 1,tables.length); assertTrue("Table must be enabled.",TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getTableStateManager().isTableState(tableName,TableState.State.ENABLED)); assertEquals(TableState.State.ENABLED,getStateFromMeta(tableName)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testGetTableDescriptor() throws IOException { HColumnDescriptor fam1=new HColumnDescriptor("fam1"); HColumnDescriptor fam2=new HColumnDescriptor("fam2"); HColumnDescriptor fam3=new HColumnDescriptor("fam3"); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("myTestTable")); htd.addFamily(fam1); htd.addFamily(fam2); htd.addFamily(fam3); this.admin.createTable(htd); Table table=TEST_UTIL.getConnection().getTable(htd.getTableName()); HTableDescriptor confirmedHtd=table.getTableDescriptor(); assertEquals(htd.compareTo(confirmedHtd),0); MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); table.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testDisableAndEnableTable() throws IOException { final byte[] row=Bytes.toBytes("row"); final byte[] qualifier=Bytes.toBytes("qualifier"); final byte[] value=Bytes.toBytes("value"); final TableName table=TableName.valueOf("testDisableAndEnableTable"); Table ht=TEST_UTIL.createTable(table,HConstants.CATALOG_FAMILY); Put put=new Put(row); put.addColumn(HConstants.CATALOG_FAMILY,qualifier,value); ht.put(put); Get get=new Get(row); get.addColumn(HConstants.CATALOG_FAMILY,qualifier); ht.get(get); this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.",TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getTableStateManager().isTableState(ht.getName(),TableState.State.DISABLED)); assertEquals(TableState.State.DISABLED,getStateFromMeta(table)); get=new Get(row); get.addColumn(HConstants.CATALOG_FAMILY,qualifier); boolean ok=false; try { ht.get(get); } catch ( TableNotEnabledException e) { ok=true; } ok=false; Scan scan=new Scan(); try { ResultScanner scanner=ht.getScanner(scan); Result res=null; do { res=scanner.next(); } while (res != null); } catch ( TableNotEnabledException e) { ok=true; } assertTrue(ok); this.admin.enableTable(table); assertTrue("Table must be enabled.",TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getTableStateManager().isTableState(ht.getName(),TableState.State.ENABLED)); assertEquals(TableState.State.ENABLED,getStateFromMeta(table)); try { ht.get(get); } catch ( RetriesExhaustedException e) { ok=false; } assertTrue(ok); ht.close(); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testHFileReplication() throws Exception { TableName name=TableName.valueOf("testHFileReplication"); String fn1="rep1"; HColumnDescriptor hcd1=new HColumnDescriptor(fn1); hcd1.setDFSReplication((short)1); String fn="defaultRep"; HColumnDescriptor hcd=new HColumnDescriptor(fn); HTableDescriptor htd=new HTableDescriptor(name); htd.addFamily(hcd); htd.addFamily(hcd1); Table table=TEST_UTIL.createTable(htd,null); TEST_UTIL.waitTableAvailable(name); Put p=new Put(Bytes.toBytes("defaultRep_rk")); byte[] q1=Bytes.toBytes("q1"); byte[] v1=Bytes.toBytes("v1"); p.addColumn(Bytes.toBytes(fn),q1,v1); List puts=new ArrayList(2); puts.add(p); p=new Put(Bytes.toBytes("rep1_rk")); p.addColumn(Bytes.toBytes(fn1),q1,v1); puts.add(p); try { table.put(puts); admin.flush(name); List regions=TEST_UTIL.getMiniHBaseCluster().getRegions(name); for ( HRegion r : regions) { Store store=r.getStore(Bytes.toBytes(fn)); for ( StoreFile sf : store.getStorefiles()) { assertTrue(sf.toString().contains(fn)); assertTrue("Column family " + fn + " should have 3 copies",FSUtils.getDefaultReplication(TEST_UTIL.getTestFileSystem(),sf.getPath()) == (sf.getFileInfo().getFileStatus().getReplication())); } store=r.getStore(Bytes.toBytes(fn1)); for ( StoreFile sf : store.getStorefiles()) { assertTrue(sf.toString().contains(fn1)); assertTrue("Column family " + fn1 + " should have only 1 copy",1 == sf.getFileInfo().getFileStatus().getReplication()); } } } finally { if (admin.isTableEnabled(name)) { this.admin.disableTable(name); this.admin.deleteTable(name); } } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=120000) public void testTableExist() throws IOException { final TableName table=TableName.valueOf("testTableExist"); boolean exist; exist=this.admin.tableExists(table); assertEquals(false,exist); TEST_UTIL.createTable(table,HConstants.CATALOG_FAMILY); exist=this.admin.tableExists(table); assertEquals(true,exist); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testCreateTableNumberOfRegions() throws IOException, InterruptedException { TableName tableName=TableName.valueOf("testCreateTableNumberOfRegions"); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); List regions; try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(tableName)){ regions=l.getAllRegionLocations(); assertEquals("Table should have only 1 region",1,regions.size()); } TableName TABLE_2=TableName.valueOf(tableName.getNameAsString() + "_2"); desc=new HTableDescriptor(TABLE_2); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc,new byte[][]{new byte[]{42}}); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(TABLE_2)){ regions=l.getAllRegionLocations(); assertEquals("Table should have only 2 region",2,regions.size()); } TableName TABLE_3=TableName.valueOf(tableName.getNameAsString() + "_3"); desc=new HTableDescriptor(TABLE_3); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc,"a".getBytes(),"z".getBytes(),3); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(TABLE_3)){ regions=l.getAllRegionLocations(); assertEquals("Table should have only 3 region",3,regions.size()); } TableName TABLE_4=TableName.valueOf(tableName.getNameAsString() + "_4"); desc=new HTableDescriptor(TABLE_4); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); try { admin.createTable(desc,"a".getBytes(),"z".getBytes(),2); fail("Should not be able to create a table with only 2 regions using this API."); } catch ( IllegalArgumentException eae) { } TableName TABLE_5=TableName.valueOf(tableName.getNameAsString() + "_5"); desc=new HTableDescriptor(TABLE_5); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc,new byte[]{1},new byte[]{127},16); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(TABLE_5)){ regions=l.getAllRegionLocations(); assertEquals("Table should have 16 region",16,regions.size()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=300000) public void testShouldFailOnlineSchemaUpdateIfOnlineSchemaIsNotEnabled() throws Exception { final TableName tableName=TableName.valueOf("changeTableSchemaOnlineFailure"); TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean("hbase.online.schema.update.enable",false); HTableDescriptor[] tables=admin.listTables(); int numTables=tables.length; TEST_UTIL.createTable(tableName,HConstants.CATALOG_FAMILY).close(); tables=this.admin.listTables(); assertEquals(numTables + 1,tables.length); HTableDescriptor htd=this.admin.getTableDescriptor(tableName); HTableDescriptor copy=new HTableDescriptor(htd); assertTrue(htd.equals(copy)); long newFlushSize=htd.getMemStoreFlushSize() / 2; if (newFlushSize <= 0) { newFlushSize=HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2; } copy.setMemStoreFlushSize(newFlushSize); final String key="anyoldkey"; assertTrue(htd.getValue(key) == null); copy.setValue(key,key); boolean expectedException=false; try { admin.modifyTable(tableName,copy); } catch ( TableNotDisabledException re) { expectedException=true; } assertTrue("Online schema update should not happen.",expectedException); TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean("hbase.online.schema.update.enable",true); }

InternalCallVerifier EqualityVerifier 
@Test public void testMergeRegions() throws Exception { TableName tableName=TableName.valueOf("testMergeWithFullRegionName"); HColumnDescriptor cd=new HColumnDescriptor("d"); HTableDescriptor td=new HTableDescriptor(tableName); td.addFamily(cd); byte[][] splitRows=new byte[2][]; splitRows[0]=new byte[]{(byte)'3'}; splitRows[1]=new byte[]{(byte)'6'}; try { TEST_UTIL.createTable(td,splitRows); TEST_UTIL.waitTableAvailable(tableName); List tableRegions; HRegionInfo regionA; HRegionInfo regionB; tableRegions=admin.getTableRegions(tableName); assertEquals(3,admin.getTableRegions(tableName).size()); regionA=tableRegions.get(0); regionB=tableRegions.get(1); admin.mergeRegions(regionA.getRegionName(),regionB.getRegionName(),false); Thread.sleep(1000); assertEquals(2,admin.getTableRegions(tableName).size()); tableRegions=admin.getTableRegions(tableName); regionA=tableRegions.get(0); regionB=tableRegions.get(1); admin.mergeRegions(regionA.getEncodedNameAsBytes(),regionB.getEncodedNameAsBytes(),false); Thread.sleep(1000); assertEquals(1,admin.getTableRegions(tableName).size()); } finally { this.admin.disableTable(tableName); this.admin.deleteTable(tableName); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=300000) public void testCreateTableWithRegions() throws IOException, InterruptedException { TableName tableName=TableName.valueOf("testCreateTableWithRegions"); byte[][] splitKeys={new byte[]{1,1,1},new byte[]{2,2,2},new byte[]{3,3,3},new byte[]{4,4,4},new byte[]{5,5,5},new byte[]{6,6,6},new byte[]{7,7,7},new byte[]{8,8,8},new byte[]{9,9,9}}; int expectedRegions=splitKeys.length + 1; HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc,splitKeys); boolean tableAvailable=admin.isTableAvailable(tableName,splitKeys); assertTrue("Table should be created with splitKyes + 1 rows in META",tableAvailable); List regions; Iterator hris; HRegionInfo hri; ClusterConnection conn=(ClusterConnection)TEST_UTIL.getConnection(); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(tableName)){ regions=l.getAllRegionLocations(); assertEquals("Tried to create " + expectedRegions + " regions "+ "but only found "+ regions.size(),expectedRegions,regions.size()); System.err.println("Found " + regions.size() + " regions"); hris=regions.iterator(); hri=hris.next().getRegionInfo(); assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[0])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[0])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[1])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[1])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[2])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[2])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[3])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[3])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[4])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[4])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[5])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[5])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[6])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[6])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[7])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[7])); assertTrue(Bytes.equals(hri.getEndKey(),splitKeys[8])); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),splitKeys[8])); assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0); verifyRoundRobinDistribution(conn,l,expectedRegions); } byte[] startKey={1,1,1,1,1,1,1,1,1,1}; byte[] endKey={9,9,9,9,9,9,9,9,9,9}; expectedRegions=10; TableName TABLE_2=TableName.valueOf(tableName.getNameAsString() + "_2"); desc=new HTableDescriptor(TABLE_2); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin=TEST_UTIL.getHBaseAdmin(); admin.createTable(desc,startKey,endKey,expectedRegions); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(TABLE_2)){ regions=l.getAllRegionLocations(); assertEquals("Tried to create " + expectedRegions + " regions "+ "but only found "+ regions.size(),expectedRegions,regions.size()); System.err.println("Found " + regions.size() + " regions"); hris=regions.iterator(); hri=hris.next().getRegionInfo(); assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{1,1,1,1,1,1,1,1,1,1})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{1,1,1,1,1,1,1,1,1,1})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{2,2,2,2,2,2,2,2,2,2})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{2,2,2,2,2,2,2,2,2,2})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{3,3,3,3,3,3,3,3,3,3})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{3,3,3,3,3,3,3,3,3,3})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{4,4,4,4,4,4,4,4,4,4})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{4,4,4,4,4,4,4,4,4,4})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{5,5,5,5,5,5,5,5,5,5})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{5,5,5,5,5,5,5,5,5,5})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{6,6,6,6,6,6,6,6,6,6})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{6,6,6,6,6,6,6,6,6,6})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{7,7,7,7,7,7,7,7,7,7})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{7,7,7,7,7,7,7,7,7,7})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{8,8,8,8,8,8,8,8,8,8})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{8,8,8,8,8,8,8,8,8,8})); assertTrue(Bytes.equals(hri.getEndKey(),new byte[]{9,9,9,9,9,9,9,9,9,9})); hri=hris.next().getRegionInfo(); assertTrue(Bytes.equals(hri.getStartKey(),new byte[]{9,9,9,9,9,9,9,9,9,9})); assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0); verifyRoundRobinDistribution(conn,l,expectedRegions); } startKey=new byte[]{0,0,0,0,0,0}; endKey=new byte[]{1,0,0,0,0,0}; expectedRegions=5; TableName TABLE_3=TableName.valueOf(tableName.getNameAsString() + "_3"); desc=new HTableDescriptor(TABLE_3); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin=TEST_UTIL.getHBaseAdmin(); admin.createTable(desc,startKey,endKey,expectedRegions); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(TABLE_3)){ regions=l.getAllRegionLocations(); assertEquals("Tried to create " + expectedRegions + " regions "+ "but only found "+ regions.size(),expectedRegions,regions.size()); System.err.println("Found " + regions.size() + " regions"); verifyRoundRobinDistribution(conn,l,expectedRegions); } splitKeys=new byte[][]{new byte[]{1,1,1},new byte[]{2,2,2},new byte[]{3,3,3},new byte[]{2,2,2}}; TableName TABLE_4=TableName.valueOf(tableName.getNameAsString() + "_4"); desc=new HTableDescriptor(TABLE_4); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); try { admin.createTable(desc,splitKeys); assertTrue("Should not be able to create this table because of " + "duplicate split keys",false); } catch ( IllegalArgumentException iae) { } }

Class: org.apache.hadoop.hbase.client.TestAdmin2

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testGetRegion() throws Exception { HBaseAdmin rawAdmin=TEST_UTIL.getHBaseAdmin(); final TableName tableName=TableName.valueOf("testGetRegion"); LOG.info("Started " + tableName); Table t=TEST_UTIL.createMultiRegionTable(tableName,HConstants.CATALOG_FAMILY); try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ HRegionLocation regionLocation=locator.getRegionLocation(Bytes.toBytes("mmm")); HRegionInfo region=regionLocation.getRegionInfo(); byte[] regionName=region.getRegionName(); Pair pair=rawAdmin.getRegion(regionName); assertTrue(Bytes.equals(regionName,pair.getFirst().getRegionName())); pair=rawAdmin.getRegion(region.getEncodedNameAsBytes()); assertTrue(Bytes.equals(regionName,pair.getFirst().getRegionName())); } }

InternalCallVerifier EqualityVerifier 
/** * For HBASE-2556 * @throws IOException */ @Test(timeout=300000) public void testGetTableRegions() throws IOException { final TableName tableName=TableName.valueOf("testGetTableRegions"); int expectedRegions=10; byte[] startKey={1,1,1,1,1,1,1,1,1,1}; byte[] endKey={9,9,9,9,9,9,9,9,9,9}; HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc,startKey,endKey,expectedRegions); List RegionInfos=admin.getTableRegions(tableName); assertEquals("Tried to create " + expectedRegions + " regions "+ "but only found "+ RegionInfos.size(),expectedRegions,RegionInfos.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testCloseRegionWhenEncodedRegionNameIsNotGiven() throws Exception { byte[] TABLENAME=Bytes.toBytes("TestHBACloseRegion4"); createTableWithDefaultConf(TABLENAME); HRegionInfo info=null; HRegionServer rs=TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME)); List onlineRegions=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); for ( HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion4")) { info=regionInfo; try { admin.closeRegionWithEncodedRegionName(regionInfo.getRegionNameAsString(),rs.getServerName().getServerName()); } catch ( NotServingRegionException nsre) { } } } } onlineRegions=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); assertTrue("The region should be present in online regions list.",onlineRegions.contains(info)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testAbortProcedureFail() throws Exception { Random randomGenerator=new Random(); long procId=randomGenerator.nextLong(); boolean abortResult=admin.abortProcedure(procId,true); assertFalse(abortResult); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testBalancer() throws Exception { boolean initialState=admin.isBalancerEnabled(); boolean prevState=admin.setBalancerRunning(!initialState,true); assertEquals(initialState,prevState); assertEquals(!initialState,admin.isBalancerEnabled()); prevState=admin.setBalancerRunning(initialState,true); assertEquals(!initialState,prevState); assertEquals(initialState,admin.isBalancerEnabled()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testRegionNormalizer() throws Exception { boolean initialState=admin.isNormalizerEnabled(); boolean prevState=admin.setNormalizerRunning(!initialState); assertEquals(initialState,prevState); assertEquals(!initialState,admin.isNormalizerEnabled()); prevState=admin.setNormalizerRunning(initialState); assertEquals(!initialState,prevState); assertEquals(initialState,admin.isNormalizerEnabled()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); TableName tableName=TableName.valueOf("testMoveToPreviouslyAssignedRS"); Admin localAdmin=createTable(tableName); List tableRegions=localAdmin.getTableRegions(tableName); HRegionInfo hri=tableRegions.get(0); AssignmentManager am=master.getAssignmentManager(); assertTrue("Region " + hri.getRegionNameAsString() + " should be assigned properly",am.waitForAssignment(hri)); ServerName server=am.getRegionStates().getRegionServerOfRegion(hri); localAdmin.move(hri.getEncodedNameAsBytes(),Bytes.toBytes(server.getServerName())); assertEquals("Current region server and region server before move should be same.",server,am.getRegionStates().getRegionServerOfRegion(hri)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testCreateBadTables() throws IOException { String msg=null; try { this.admin.createTable(new HTableDescriptor(TableName.META_TABLE_NAME)); } catch ( TableExistsException e) { msg=e.toString(); } assertTrue("Unexcepted exception message " + msg,msg != null && msg.startsWith(TableExistsException.class.getName()) && msg.contains(TableName.META_TABLE_NAME.getNameAsString())); final HTableDescriptor threadDesc=new HTableDescriptor(TableName.valueOf("threaded_testCreateBadTables")); threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); int count=10; Thread[] threads=new Thread[count]; final AtomicInteger successes=new AtomicInteger(0); final AtomicInteger failures=new AtomicInteger(0); final Admin localAdmin=this.admin; for (int i=0; i < count; i++) { threads[i]=new Thread(Integer.toString(i)){ @Override public void run(){ try { localAdmin.createTable(threadDesc); successes.incrementAndGet(); } catch ( TableExistsException e) { failures.incrementAndGet(); } catch ( IOException e) { throw new RuntimeException("Failed threaded create" + getName(),e); } } } ; } for (int i=0; i < count; i++) { threads[i].start(); } for (int i=0; i < count; i++) { while (threads[i].isAlive()) { try { Thread.sleep(100); } catch ( InterruptedException e) { } } } assertEquals(1,successes.get()); assertEquals(count - 1,failures.get()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testShouldCloseTheRegionBasedOnTheEncodedRegionName() throws Exception { TableName TABLENAME=TableName.valueOf("TestHBACloseRegion"); createTableWithDefaultConf(TABLENAME); HRegionInfo info=null; HRegionServer rs=TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); List onlineRegions=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); for ( HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.getTable().isSystemTable()) { info=regionInfo; admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(),rs.getServerName().getServerName()); } } boolean isInList=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); long timeout=System.currentTimeMillis() + 10000; while ((System.currentTimeMillis() < timeout) && (isInList)) { Thread.sleep(100); isInList=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); } assertFalse("The region should not be present in online regions list.",isInList); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { byte[] TABLENAME=Bytes.toBytes("TestHBACloseRegion1"); createTableWithDefaultConf(TABLENAME); HRegionInfo info=null; HRegionServer rs=TEST_UTIL.getRSForFirstRegionInTable(TableName.valueOf(TABLENAME)); List onlineRegions=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); for ( HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion1")) { info=regionInfo; try { admin.closeRegionWithEncodedRegionName("sample",rs.getServerName().getServerName()); } catch ( NotServingRegionException nsre) { } } } } onlineRegions=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); assertTrue("The region should be present in online regions list.",onlineRegions.contains(info)); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testListProcedures() throws Exception { ProcedureInfo[] procList=admin.listProcedures(); assertTrue(procList.length >= 0); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testCloseRegionThatFetchesTheHRIFromMeta() throws Exception { TableName TABLENAME=TableName.valueOf("TestHBACloseRegion2"); createTableWithDefaultConf(TABLENAME); HRegionInfo info=null; HRegionServer rs=TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); List onlineRegions=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); for ( HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion2")) { info=regionInfo; admin.closeRegion(regionInfo.getRegionNameAsString(),rs.getServerName().getServerName()); } } } boolean isInList=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); long timeout=System.currentTimeMillis() + 10000; while ((System.currentTimeMillis() < timeout) && (isInList)) { Thread.sleep(100); isInList=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); } assertFalse("The region should not be present in online regions list.",isInList); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testWALRollWriting() throws Exception { setUpforLogRolling(); String className=this.getClass().getName(); StringBuilder v=new StringBuilder(className); while (v.length() < 1000) { v.append(className); } byte[] value=Bytes.toBytes(v.toString()); HRegionServer regionServer=startAndWriteData(TableName.valueOf("TestLogRolling"),value); LOG.info("after writing there are " + DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files"); for ( Region r : regionServer.getOnlineRegionsLocalContext()) { r.flush(true); } admin.rollWALWriter(regionServer.getServerName()); int count=DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)); LOG.info("after flushing all regions and rolling logs there are " + count + " log files"); assertTrue(("actual count: " + count),count <= 2); }

Class: org.apache.hadoop.hbase.client.TestAsyncProcess

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCallQueueTooLarge() throws IOException { ClusterConnection conn=new MyConnectionImpl(conf); BufferedMutatorImpl mutator=(BufferedMutatorImpl)conn.getBufferedMutator(DUMMY_TABLE); AsyncProcessWithFailure ap=new AsyncProcessWithFailure(conn,conf,new CallQueueTooBigException()); mutator.ap=ap; Assert.assertNotNull(mutator.ap.createServerErrorTracker()); Put p=createPut(1,true); mutator.mutate(p); try { mutator.flush(); Assert.fail(); } catch ( RetriesExhaustedWithDetailsException expected) { } Assert.assertEquals(NB_RETRIES + 1,ap.callsCt.get()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReplicaAllCallsFailForOneRegion() throws Exception { MyAsyncProcessWithReplicas ap=createReplicaAp(500,1000,0,0); ap.addFailures(hri1,hri1r1,hri1r2,hri2r1); List rows=makeTimelineGets(DUMMY_BYTES_1,DUMMY_BYTES_2); AsyncRequestFuture ars=ap.submitAll(DUMMY_TABLE,rows,null,new Object[2]); verifyReplicaResult(ars,RR.FAILED,RR.FALSE); Assert.assertEquals(3,ars.getErrors().getNumExceptions()); for (int i=0; i < ars.getErrors().getNumExceptions(); ++i) { Assert.assertArrayEquals(DUMMY_BYTES_1,ars.getErrors().getRow(i).getRow()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFail() throws Exception { MyAsyncProcess ap=new MyAsyncProcess(createHConnection(),conf,false); List puts=new ArrayList(); Put p=createPut(1,false); puts.add(p); AsyncRequestFuture ars=ap.submit(DUMMY_TABLE,puts,false,null,true); Assert.assertEquals(0,puts.size()); ars.waitUntilDone(); verifyResult(ars,false); Assert.assertEquals(NB_RETRIES + 1,ap.callsCt.get()); Assert.assertEquals(1,ars.getErrors().exceptions.size()); Assert.assertTrue("was: " + ars.getErrors().exceptions.get(0),failure.equals(ars.getErrors().exceptions.get(0))); Assert.assertTrue("was: " + ars.getErrors().exceptions.get(0),failure.equals(ars.getErrors().exceptions.get(0))); Assert.assertEquals(1,ars.getFailedOperations().size()); Assert.assertTrue("was: " + ars.getFailedOperations().get(0),p.equals(ars.getFailedOperations().get(0))); }

InternalCallVerifier EqualityVerifier 
@Test public void testReplicaReplicaSuccess() throws Exception { MyAsyncProcessWithReplicas ap=createReplicaAp(10,1000,0); List rows=makeTimelineGets(DUMMY_BYTES_1,DUMMY_BYTES_2,DUMMY_BYTES_3); AsyncRequestFuture ars=ap.submitAll(DUMMY_TABLE,rows,null,new Object[3]); verifyReplicaResult(ars,RR.TRUE,RR.TRUE,RR.FALSE); Assert.assertEquals(2,ap.getReplicaCallCount()); }

InternalCallVerifier EqualityVerifier 
@Test public void testReplicaReplicaSuccessWithParallelFailures() throws Exception { MyAsyncProcessWithReplicas ap=createReplicaAp(0,1000,1000,0); ap.addFailures(hri1,hri1r2,hri2); List rows=makeTimelineGets(DUMMY_BYTES_1,DUMMY_BYTES_2); AsyncRequestFuture ars=ap.submitAll(DUMMY_TABLE,rows,null,new Object[2]); verifyReplicaResult(ars,RR.TRUE,RR.TRUE); Assert.assertEquals(2,ap.getReplicaCallCount()); }

InternalCallVerifier EqualityVerifier 
@Test public void testFlush() throws Exception { MyAsyncProcess ap=new MyAsyncProcess(createHConnection(),conf,false); List puts=new ArrayList(); puts.add(createPut(1,false)); puts.add(createPut(1,true)); puts.add(createPut(1,true)); AsyncRequestFuture ars=ap.submit(DUMMY_TABLE,puts,false,null,true); ars.waitUntilDone(); verifyResult(ars,false,true,true); Assert.assertEquals(NB_RETRIES + 1,ap.callsCt.get()); Assert.assertEquals(1,ars.getFailedOperations().size()); }

InternalCallVerifier BooleanVerifier 
@Test public void testReplicaParallelCallsSucceed() throws Exception { MyAsyncProcessWithReplicas ap=createReplicaAp(0,0,0); List rows=makeTimelineGets(DUMMY_BYTES_1,DUMMY_BYTES_2); AsyncRequestFuture ars=ap.submitAll(DUMMY_TABLE,rows,null,new Object[2]); verifyReplicaResult(ars,RR.DONT_CARE,RR.DONT_CARE); long replicaCalls=ap.getReplicaCallCount(); Assert.assertTrue(replicaCalls >= 0); Assert.assertTrue(replicaCalls <= 2); }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testErrorsServers() throws IOException { Configuration configuration=new Configuration(conf); ClusterConnection conn=new MyConnectionImpl(configuration); BufferedMutatorImpl mutator=new BufferedMutatorImpl(conn,null,null,new BufferedMutatorParams(DUMMY_TABLE)); configuration.setBoolean(ConnectionImplementation.RETRIES_BY_SERVER_KEY,true); MyAsyncProcess ap=new MyAsyncProcess(conn,configuration,true); mutator.ap=ap; Assert.assertNotNull(mutator.ap.createServerErrorTracker()); Assert.assertTrue(mutator.ap.serverTrackerTimeout > 200); mutator.ap.serverTrackerTimeout=1; Put p=createPut(1,false); mutator.mutate(p); try { mutator.flush(); Assert.fail(); } catch ( RetriesExhaustedWithDetailsException expected) { } Assert.assertEquals(NB_RETRIES + 1,ap.callsCt.get()); }

InternalCallVerifier EqualityVerifier 
@Test public void testReplicaMainFailsBeforeReplicaCalls() throws Exception { MyAsyncProcessWithReplicas ap=createReplicaAp(1000,0,0,0); ap.addFailures(hri1,hri2); List rows=makeTimelineGets(DUMMY_BYTES_1,DUMMY_BYTES_2); AsyncRequestFuture ars=ap.submitAll(DUMMY_TABLE,rows,null,new Object[2]); verifyReplicaResult(ars,RR.FAILED,RR.FAILED); Assert.assertEquals(0,ap.getReplicaCallCount()); }

InternalCallVerifier EqualityVerifier 
@Test public void testReplicaPartialReplicaCall() throws Exception { MyAsyncProcessWithReplicas ap=createReplicaAp(1000,0,0); ap.setPrimaryCallDelay(sn2,2000); List rows=makeTimelineGets(DUMMY_BYTES_1,DUMMY_BYTES_2); AsyncRequestFuture ars=ap.submitAll(DUMMY_TABLE,rows,null,new Object[2]); verifyReplicaResult(ars,RR.FALSE,RR.TRUE); Assert.assertEquals(1,ap.getReplicaCallCount()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFailAndSuccess() throws Exception { MyAsyncProcess ap=new MyAsyncProcess(createHConnection(),conf,false); List puts=new ArrayList(); puts.add(createPut(1,false)); puts.add(createPut(1,true)); puts.add(createPut(1,true)); AsyncRequestFuture ars=ap.submit(DUMMY_TABLE,puts,false,null,true); Assert.assertTrue(puts.isEmpty()); ars.waitUntilDone(); verifyResult(ars,false,true,true); Assert.assertEquals(NB_RETRIES + 1,ap.callsCt.get()); ap.callsCt.set(0); Assert.assertEquals(1,ars.getErrors().actions.size()); puts.add(createPut(1,true)); ap.waitUntilDone(); ars=ap.submit(DUMMY_TABLE,puts,false,null,true); Assert.assertEquals(0,puts.size()); ars.waitUntilDone(); Assert.assertEquals(1,ap.callsCt.get()); verifyResult(ars,true); }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobalErrors() throws IOException { ClusterConnection conn=new MyConnectionImpl(conf); BufferedMutatorImpl mutator=(BufferedMutatorImpl)conn.getBufferedMutator(DUMMY_TABLE); AsyncProcessWithFailure ap=new AsyncProcessWithFailure(conn,conf,new IOException("test")); mutator.ap=ap; Assert.assertNotNull(mutator.ap.createServerErrorTracker()); Put p=createPut(1,true); mutator.mutate(p); try { mutator.flush(); Assert.fail(); } catch ( RetriesExhaustedWithDetailsException expected) { } Assert.assertEquals(NB_RETRIES + 1,ap.callsCt.get()); }

InternalCallVerifier EqualityVerifier 
@Test public void testReplicaPrimarySuccessWoReplicaCalls() throws Exception { MyAsyncProcessWithReplicas ap=createReplicaAp(1000,10,0); List rows=makeTimelineGets(DUMMY_BYTES_1,DUMMY_BYTES_2,DUMMY_BYTES_3); AsyncRequestFuture ars=ap.submitAll(DUMMY_TABLE,rows,null,new Object[3]); verifyReplicaResult(ars,RR.FALSE,RR.FALSE,RR.FALSE); Assert.assertEquals(0,ap.getReplicaCallCount()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testHTableFailedPutAndNewPut() throws Exception { ClusterConnection conn=createHConnection(); BufferedMutatorImpl mutator=new BufferedMutatorImpl(conn,null,null,new BufferedMutatorParams(DUMMY_TABLE).writeBufferSize(0)); MyAsyncProcess ap=new MyAsyncProcess(conn,conf,true); mutator.ap=ap; Put p=createPut(1,false); mutator.mutate(p); ap.waitUntilDone(); p=createPut(1,true); Assert.assertEquals(0,mutator.writeAsyncBuffer.size()); try { mutator.mutate(p); Assert.fail(); } catch ( RetriesExhaustedException expected) { } Assert.assertEquals("the put should not been inserted.",0,mutator.writeAsyncBuffer.size()); }

InternalCallVerifier EqualityVerifier 
@Test public void testHTablePutSuccess() throws Exception { BufferedMutatorImpl ht=Mockito.mock(BufferedMutatorImpl.class); ht.ap=new MyAsyncProcess(createHConnection(),conf,true); Put put=createPut(1,true); Assert.assertEquals(0,ht.getWriteBufferSize()); ht.mutate(put); Assert.assertEquals(0,ht.getWriteBufferSize()); }

Class: org.apache.hadoop.hbase.client.TestAttributes

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteAttributes(){ Delete del=new Delete(new byte[]{'r'}); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttribute("absent")); del.setAttribute("absent",null); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttribute("absent")); del.setAttribute("attribute1",Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),del.getAttribute("attribute1"))); Assert.assertEquals(1,del.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),del.getAttributesMap().get("attribute1"))); del.setAttribute("attribute1",Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),del.getAttribute("attribute1"))); Assert.assertEquals(1,del.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),del.getAttributesMap().get("attribute1"))); del.setAttribute("attribute2",Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),del.getAttribute("attribute2"))); Assert.assertEquals(2,del.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),del.getAttributesMap().get("attribute2"))); del.setAttribute("attribute2",null); Assert.assertNull(del.getAttribute("attribute2")); Assert.assertEquals(1,del.getAttributesMap().size()); Assert.assertNull(del.getAttributesMap().get("attribute2")); del.setAttribute("attribute2",null); Assert.assertNull(del.getAttribute("attribute2")); Assert.assertEquals(1,del.getAttributesMap().size()); Assert.assertNull(del.getAttributesMap().get("attribute2")); del.setAttribute("attribute1",null); Assert.assertNull(del.getAttribute("attribute1")); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttributesMap().get("attribute1")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testScanId(){ Scan scan=new Scan(); Assert.assertNull("Make sure id is null if unset",scan.toMap().get("id")); scan.setId("myId"); Assert.assertEquals("myId",scan.toMap().get("id")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPutId(){ Put put=new Put(ROW); Assert.assertNull("Make sure id is null if unset",put.toMap().get("id")); put.setId("myId"); Assert.assertEquals("myId",put.toMap().get("id")); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPutAttributes(){ Put put=new Put(ROW); Assert.assertTrue(put.getAttributesMap().isEmpty()); Assert.assertNull(put.getAttribute("absent")); put.setAttribute("absent",null); Assert.assertTrue(put.getAttributesMap().isEmpty()); Assert.assertNull(put.getAttribute("absent")); put.setAttribute("attribute1",Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),put.getAttribute("attribute1"))); Assert.assertEquals(1,put.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),put.getAttributesMap().get("attribute1"))); put.setAttribute("attribute1",Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),put.getAttribute("attribute1"))); Assert.assertEquals(1,put.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),put.getAttributesMap().get("attribute1"))); put.setAttribute("attribute2",Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),put.getAttribute("attribute2"))); Assert.assertEquals(2,put.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),put.getAttributesMap().get("attribute2"))); put.setAttribute("attribute2",null); Assert.assertNull(put.getAttribute("attribute2")); Assert.assertEquals(1,put.getAttributesMap().size()); Assert.assertNull(put.getAttributesMap().get("attribute2")); put.setAttribute("attribute2",null); Assert.assertNull(put.getAttribute("attribute2")); Assert.assertEquals(1,put.getAttributesMap().size()); Assert.assertNull(put.getAttributesMap().get("attribute2")); put.setAttribute("attribute1",null); Assert.assertNull(put.getAttribute("attribute1")); Assert.assertTrue(put.getAttributesMap().isEmpty()); Assert.assertNull(put.getAttributesMap().get("attribute1")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetId(){ Get get=new Get(ROW); Assert.assertNull("Make sure id is null if unset",get.toMap().get("id")); get.setId("myId"); Assert.assertEquals("myId",get.toMap().get("id")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteId(){ Delete delete=new Delete(ROW); Assert.assertNull("Make sure id is null if unset",delete.toMap().get("id")); delete.setId("myId"); Assert.assertEquals("myId",delete.toMap().get("id")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAppendId(){ Append append=new Append(ROW); Assert.assertNull("Make sure id is null if unset",append.toMap().get("id")); append.setId("myId"); Assert.assertEquals("myId",append.toMap().get("id")); }

Class: org.apache.hadoop.hbase.client.TestBlockEvictionFromClient

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiGets() throws IOException, InterruptedException { HTable table=null; try { latch=new CountDownLatch(2); getLatch=new CountDownLatch(1); TableName tableName=TableName.valueOf("testMultiGets"); table=TEST_UTIL.createTable(tableName,FAMILIES_1,1,1024,CustomInnerRegionObserver.class.getName()); RegionLocator locator=table.getRegionLocator(); String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); Store store=region.getStores().iterator().next(); CacheConfig cacheConf=store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); BlockCache cache=cacheConf.getBlockCache(); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); put=new Put(ROW1); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); byte[] QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); region.flush(true); System.out.println("Flushing cache"); CustomInnerRegionObserver.waitForGets.set(true); MultiGetThread[] getThreads=initiateMultiGet(table); Thread.sleep(200); int refCount; Iterator iterator=cache.iterator(); boolean foundNonZeroBlock=false; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { assertEquals(NO_OF_THREADS,refCount); foundNonZeroBlock=true; } } assertTrue("Should have found nonzero ref count block",foundNonZeroBlock); CustomInnerRegionObserver.getCdl().get().countDown(); CustomInnerRegionObserver.getCdl().get().countDown(); for ( MultiGetThread thread : getThreads) { thread.join(); } CustomInnerRegionObserver.waitForGets.set(true); iterateBlockCache(cache,iterator); getLatch.countDown(); System.out.println("Gets should have returned the bloks"); } finally { if (table != null) { table.close(); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() throws IOException, InterruptedException { HTable table=null; try { latch=new CountDownLatch(1); compactionLatch=new CountDownLatch(1); TableName tableName=TableName.valueOf("testBlockEvictionAfterHBASE13082WithCompactionAndFlush"); table=TEST_UTIL.createTable(tableName,FAMILIES_1,1,1024,CustomInnerRegionObserverWrapper.class.getName()); RegionLocator locator=table.getRegionLocator(); String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); Store store=region.getStores().iterator().next(); CacheConfig cacheConf=store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); BlockCache cache=cacheConf.getBlockCache(); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); put=new Put(ROW1); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); assertTrue(Bytes.equals(table.get(new Get(ROW)).value(),data)); region.flush(true); int refCount=0; byte[] QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); byte[] data2=Bytes.add(data,data); put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); System.out.println("Flushing cache"); region.flush(true); Iterator iterator=cache.iterator(); iterateBlockCache(cache,iterator); ScanThread[] scanThreads=initiateScan(table,false); Thread.sleep(100); iterator=cache.iterator(); boolean usedBlocksFound=false; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { assertEquals(NO_OF_THREADS,refCount); usedBlocksFound=true; } } QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); data2=Bytes.add(data,data); put=new Put(ROW1); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); System.out.println("Flushing cache"); region.flush(true); assertTrue("Blocks with non zero ref count should be found ",usedBlocksFound); usedBlocksFound=false; System.out.println("Compacting"); assertEquals(3,store.getStorefilesCount()); store.triggerMajorCompaction(); region.compact(true); waitForStoreFileCount(store,1,10000); assertEquals(1,store.getStorefilesCount()); iterator=cache.iterator(); while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { assertEquals(NO_OF_THREADS,refCount); usedBlocksFound=true; } } assertTrue("Blocks with non zero ref count should be found ",usedBlocksFound); compactionLatch.countDown(); latch.countDown(); for ( ScanThread thread : scanThreads) { thread.join(); } iterator=cache.iterator(); iterateBlockCache(cache,iterator); Result r=table.get(new Get(ROW)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER),data)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER2),data2)); iterator=cache.iterator(); iterateBlockCache(cache,iterator); } finally { if (table != null) { table.close(); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetsWithMultiColumnsAndExplicitTracker() throws IOException, InterruptedException { HTable table=null; try { latch=new CountDownLatch(1); getLatch=new CountDownLatch(1); TableName tableName=TableName.valueOf("testGetsWithMultiColumnsAndExplicitTracker"); table=TEST_UTIL.createTable(tableName,FAMILIES_1,1,1024,CustomInnerRegionObserver.class.getName()); RegionLocator locator=table.getRegionLocator(); String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); BlockCache cache=setCacheProperties(region); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); put=new Put(ROW1); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); for (int i=1; i < 10; i++) { put=new Put(ROW); put.addColumn(FAMILY,Bytes.toBytes("testQualifier" + i),data2); table.put(put); if (i % 2 == 0) { region.flush(true); } } byte[] QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); region.flush(true); System.out.println("Flushing cache"); CustomInnerRegionObserver.waitForGets.set(true); GetThread[] getThreads=initiateGet(table,true,false); Thread.sleep(200); Iterator iterator=cache.iterator(); boolean usedBlocksFound=false; int refCount=0; int noOfBlocksWithRef=0; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { System.out.println("The refCount is " + refCount); assertEquals(NO_OF_THREADS,refCount); usedBlocksFound=true; noOfBlocksWithRef++; } } assertTrue(usedBlocksFound); assertEquals(10,noOfBlocksWithRef); CustomInnerRegionObserver.getCdl().get().countDown(); for ( GetThread thread : getThreads) { thread.join(); } CustomInnerRegionObserver.waitForGets.set(true); checkForBlockEviction(cache,true,false,false); getLatch.countDown(); System.out.println("Gets should have returned the bloks"); } finally { if (table != null) { table.close(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockEvictionWithParallelScans() throws Exception { HTable table=null; try { latch=new CountDownLatch(1); TableName tableName=TableName.valueOf("testBlockEvictionWithParallelScans"); table=TEST_UTIL.createTable(tableName,FAMILIES_1,1,1024,CustomInnerRegionObserver.class.getName()); RegionLocator locator=table.getRegionLocator(); String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); Store store=region.getStores().iterator().next(); CacheConfig cacheConf=store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); BlockCache cache=cacheConf.getBlockCache(); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); put=new Put(ROW1); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); assertTrue(Bytes.equals(table.get(new Get(ROW)).value(),data)); System.out.println("Flushing cache in problematic area"); region.flush(true); ScanThread[] scanThreads=initiateScan(table,false); Thread.sleep(100); checkForBlockEviction(cache,false,false,false); for ( ScanThread thread : scanThreads) { thread.join(); } Iterator iterator=cache.iterator(); iterateBlockCache(cache,iterator); assertTrue(Bytes.equals(table.get(new Get(ROW)).value(),data)); iterator=cache.iterator(); iterateBlockCache(cache,iterator); byte[] QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); byte[] data2=Bytes.add(data,data); put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); Result r=table.get(new Get(ROW)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER),data)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER2),data2)); iterator=cache.iterator(); iterateBlockCache(cache,iterator); System.out.println("Flushing cache"); region.flush(true); iterator=cache.iterator(); iterateBlockCache(cache,iterator); System.out.println("Compacting"); assertEquals(2,store.getStorefilesCount()); store.triggerMajorCompaction(); region.compact(true); waitForStoreFileCount(store,1,10000); assertEquals(1,store.getStorefilesCount()); iterator=cache.iterator(); iterateBlockCache(cache,iterator); r=table.get(new Get(ROW)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER),data)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER2),data2)); iterator=cache.iterator(); iterateBlockCache(cache,iterator); } finally { if (table != null) { table.close(); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanWithMultipleColumnFamilies() throws IOException, InterruptedException { HTable table=null; try { latch=new CountDownLatch(1); TableName tableName=TableName.valueOf("testScanWithMultipleColumnFamilies"); byte[][] fams=new byte[10][]; fams[0]=FAMILY; for (int i=1; i < 10; i++) { fams[i]=(Bytes.toBytes("testFamily" + i)); } table=TEST_UTIL.createTable(tableName,fams,1,1024,CustomInnerRegionObserver.class.getName()); RegionLocator locator=table.getRegionLocator(); String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); BlockCache cache=setCacheProperties(region); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); put=new Put(ROW1); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); for (int i=1; i < 10; i++) { put=new Put(ROW); put.addColumn(Bytes.toBytes("testFamily" + i),Bytes.toBytes("testQualifier" + i),data2); table.put(put); if (i % 2 == 0) { region.flush(true); } } region.flush(true); byte[] QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); region.flush(true); System.out.println("Flushing cache"); ScanThread[] scanThreads=initiateScan(table,true); Thread.sleep(200); Iterator iterator=cache.iterator(); boolean usedBlocksFound=false; int refCount=0; int noOfBlocksWithRef=0; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { System.out.println("The refCount is " + refCount); assertEquals(NO_OF_THREADS,refCount); usedBlocksFound=true; noOfBlocksWithRef++; } } assertTrue(usedBlocksFound); assertEquals(12,noOfBlocksWithRef); CustomInnerRegionObserver.getCdl().get().countDown(); for ( ScanThread thread : scanThreads) { thread.join(); } checkForBlockEviction(cache,true,false,false); } finally { if (table != null) { table.close(); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedException { HTable table=null; try { latch=new CountDownLatch(1); getLatch=new CountDownLatch(1); TableName tableName=TableName.valueOf("testGetWithMultipleColumnFamilies"); byte[][] fams=new byte[10][]; fams[0]=FAMILY; for (int i=1; i < 10; i++) { fams[i]=(Bytes.toBytes("testFamily" + i)); } table=TEST_UTIL.createTable(tableName,fams,1,1024,CustomInnerRegionObserver.class.getName()); RegionLocator locator=table.getRegionLocator(); String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); BlockCache cache=setCacheProperties(region); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); put=new Put(ROW1); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); region.flush(true); for (int i=1; i < 10; i++) { put=new Put(ROW); put.addColumn(Bytes.toBytes("testFamily" + i),Bytes.toBytes("testQualifier" + i),data2); table.put(put); if (i % 2 == 0) { region.flush(true); } } region.flush(true); byte[] QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); region.flush(true); System.out.println("Flushing cache"); CustomInnerRegionObserver.waitForGets.set(true); GetThread[] getThreads=initiateGet(table,true,true); Thread.sleep(200); Iterator iterator=cache.iterator(); boolean usedBlocksFound=false; int refCount=0; int noOfBlocksWithRef=0; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { System.out.println("The refCount is " + refCount); assertEquals(NO_OF_THREADS,refCount); usedBlocksFound=true; noOfBlocksWithRef++; } } assertTrue(usedBlocksFound); assertEquals(3,noOfBlocksWithRef); CustomInnerRegionObserver.getCdl().get().countDown(); for ( GetThread thread : getThreads) { thread.join(); } CustomInnerRegionObserver.waitForGets.set(true); checkForBlockEviction(cache,true,false,false); getLatch.countDown(); System.out.println("Gets should have returned the bloks"); } finally { if (table != null) { table.close(); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanWithException() throws IOException, InterruptedException { HTable table=null; try { latch=new CountDownLatch(1); exceptionLatch=new CountDownLatch(1); TableName tableName=TableName.valueOf("testScanWithException"); table=TEST_UTIL.createTable(tableName,FAMILIES_1,1,1024,CustomInnerRegionObserverWrapper.class.getName()); RegionLocator locator=table.getRegionLocator(); String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); Store store=region.getStores().iterator().next(); CacheConfig cacheConf=store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); BlockCache cache=cacheConf.getBlockCache(); insertData(table); System.out.println("Flushing cache"); region.flush(true); CustomInnerRegionObserver.throwException.set(true); ScanThread[] scanThreads=initiateScan(table,false); Thread.sleep(100); Iterator iterator=cache.iterator(); boolean usedBlocksFound=false; int refCount=0; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { assertEquals(NO_OF_THREADS,refCount); usedBlocksFound=true; } } assertTrue(usedBlocksFound); exceptionLatch.countDown(); CustomInnerRegionObserver.getCdl().get().countDown(); for ( ScanThread thread : scanThreads) { thread.join(); } iterator=cache.iterator(); usedBlocksFound=false; refCount=0; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } if (refCount != 0) { assertEquals(NO_OF_THREADS,refCount); usedBlocksFound=true; } } assertTrue(usedBlocksFound); Thread.sleep(5100); iterator=cache.iterator(); refCount=0; while (iterator.hasNext()) { CachedBlock next=iterator.next(); BlockCacheKey cacheKey=new BlockCacheKey(next.getFilename(),next.getOffset()); if (cache instanceof BucketCache) { refCount=((BucketCache)cache).getRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { refCount=((CombinedBlockCache)cache).getRefCount(cacheKey); } else { continue; } assertEquals(0,refCount); } } finally { if (table != null) { table.close(); } } }

Class: org.apache.hadoop.hbase.client.TestCheckAndMutate

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testCheckAndMutate() throws Exception { final TableName tableName=TableName.valueOf("TestPutWithDelete"); final byte[] rowKey=Bytes.toBytes("12345"); final byte[] family=Bytes.toBytes("cf"); Table table=TEST_UTIL.createTable(tableName,family); TEST_UTIL.waitTableAvailable(tableName.getName(),5000); try { Put put=new Put(rowKey); put.addColumn(family,Bytes.toBytes("A"),Bytes.toBytes("a")); put.addColumn(family,Bytes.toBytes("B"),Bytes.toBytes("b")); put.addColumn(family,Bytes.toBytes("C"),Bytes.toBytes("c")); table.put(put); Get get=new Get(rowKey); Result result=table.get(get); assertTrue("Column A value should be a",Bytes.toString(result.getValue(family,Bytes.toBytes("A"))).equals("a")); assertTrue("Column B value should be b",Bytes.toString(result.getValue(family,Bytes.toBytes("B"))).equals("b")); assertTrue("Column C value should be c",Bytes.toString(result.getValue(family,Bytes.toBytes("C"))).equals("c")); RowMutations rm=new RowMutations(rowKey); put=new Put(rowKey); put.addColumn(family,Bytes.toBytes("A"),Bytes.toBytes("a")); put.addColumn(family,Bytes.toBytes("B"),Bytes.toBytes("b")); rm.add(put); Delete del=new Delete(rowKey); del.addColumn(family,Bytes.toBytes("C")); rm.add(del); boolean res=table.checkAndMutate(rowKey,family,Bytes.toBytes("A"),CompareFilter.CompareOp.EQUAL,Bytes.toBytes("a"),rm); assertTrue(res); get=new Get(rowKey); result=table.get(get); assertTrue("Column A value should be a",Bytes.toString(result.getValue(family,Bytes.toBytes("A"))).equals("a")); assertTrue("Column B value should be b",Bytes.toString(result.getValue(family,Bytes.toBytes("B"))).equals("b")); assertTrue("Column C should not exist",result.getValue(family,Bytes.toBytes("C")) == null); try { Put p=new Put(rowKey); byte[] value=new byte[0]; p.addColumn(new byte[]{'b','o','g','u','s'},new byte[]{'A'},value); rm=new RowMutations(rowKey); rm.add(p); table.checkAndMutate(rowKey,family,Bytes.toBytes("A"),CompareFilter.CompareOp.EQUAL,Bytes.toBytes("a"),rm); fail("Expected NoSuchColumnFamilyException"); } catch ( NoSuchColumnFamilyException e) { } } finally { table.close(); } }

Class: org.apache.hadoop.hbase.client.TestClientExponentialBackoff

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMaxLoad(){ Configuration conf=new Configuration(false); ExponentialClientBackoffPolicy backoff=new ExponentialClientBackoffPolicy(conf); ServerStatistics stats=new ServerStatistics(); update(stats,100); assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF,backoff.getBackoffTime(server,regionname,stats)); long max=100; conf.setLong(ExponentialClientBackoffPolicy.MAX_BACKOFF_KEY,max); ExponentialClientBackoffPolicy backoffShortTimeout=new ExponentialClientBackoffPolicy(conf); assertEquals(max,backoffShortTimeout.getBackoffTime(server,regionname,stats)); update(stats,101); assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF,backoff.getBackoffTime(server,regionname,stats)); assertEquals(max,backoffShortTimeout.getBackoffTime(server,regionname,stats)); update(stats,99); assertTrue(backoff.getBackoffTime(server,regionname,stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); assertTrue(backoffShortTimeout.getBackoffTime(server,regionname,stats) < max); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHeapOccupancyPolicy(){ Configuration conf=new Configuration(false); ExponentialClientBackoffPolicy backoff=new ExponentialClientBackoffPolicy(conf); ServerStatistics stats=new ServerStatistics(); long backoffTime; update(stats,0,95,0); backoffTime=backoff.getBackoffTime(server,regionname,stats); assertTrue("Heap occupancy at low watermark had no effect",backoffTime > 0); long previous=backoffTime; update(stats,0,96,0); backoffTime=backoff.getBackoffTime(server,regionname,stats); assertTrue("Increase above low watermark should have increased backoff",backoffTime > previous); update(stats,0,98,0); backoffTime=backoff.getBackoffTime(server,regionname,stats); assertEquals("We should be using max backoff when at high watermark",backoffTime,ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); }

InternalCallVerifier EqualityVerifier 
@Test public void testNulls(){ Configuration conf=new Configuration(false); ExponentialClientBackoffPolicy backoff=new ExponentialClientBackoffPolicy(conf); assertEquals(0,backoff.getBackoffTime(null,null,null)); assertEquals(0,backoff.getBackoffTime(server,null,null)); assertEquals(0,backoff.getBackoffTime(server,regionname,null)); ServerStatistics stats=new ServerStatistics(); assertEquals(0,backoff.getBackoffTime(server,regionname,stats)); }

IterativeVerifier InternalCallVerifier BooleanVerifier 
/** * Make sure that we get results in the order that we expect - backoff for a load of 1 should * less than backoff for 10, which should be less than that for 50. */ @Test public void testResultOrdering(){ Configuration conf=new Configuration(false); conf.setLong(ExponentialClientBackoffPolicy.MAX_BACKOFF_KEY,Integer.MAX_VALUE); ExponentialClientBackoffPolicy backoff=new ExponentialClientBackoffPolicy(conf); ServerStatistics stats=new ServerStatistics(); long previous=backoff.getBackoffTime(server,regionname,stats); for (int i=1; i <= 100; i++) { update(stats,i); long next=backoff.getBackoffTime(server,regionname,stats); assertTrue("Previous backoff time" + previous + " >= "+ next+ ", the next backoff time for "+ "load "+ i,previous < next); previous=next; } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompactionPressurePolicy(){ Configuration conf=new Configuration(false); ExponentialClientBackoffPolicy backoff=new ExponentialClientBackoffPolicy(conf); ServerStatistics stats=new ServerStatistics(); long backoffTime; update(stats,0,0,0); backoffTime=backoff.getBackoffTime(server,regionname,stats); assertTrue("Compaction pressure has no effect",backoffTime == 0); long previous=backoffTime; update(stats,0,0,50); backoffTime=backoff.getBackoffTime(server,regionname,stats); assertTrue("Compaction pressure should be bigger",backoffTime > previous); update(stats,0,0,100); backoffTime=backoff.getBackoffTime(server,regionname,stats); assertEquals("under heavy compaction pressure",backoffTime,ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); }

Class: org.apache.hadoop.hbase.client.TestClientOperationInterrupt

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testInterrupt50Percent() throws IOException, InterruptedException { final AtomicInteger noEx=new AtomicInteger(0); final AtomicInteger badEx=new AtomicInteger(0); final AtomicInteger noInt=new AtomicInteger(0); final AtomicInteger done=new AtomicInteger(0); List threads=new ArrayList(); final int nbThread=100; for (int i=0; i < nbThread; i++) { Thread t=new Thread(){ @Override public void run(){ try { Table ht=util.getConnection().getTable(tableName); Result r=ht.get(new Get(row1)); noEx.incrementAndGet(); } catch ( IOException e) { LOG.info("exception",e); if (!(e instanceof InterruptedIOException) || (e instanceof SocketTimeoutException)) { badEx.incrementAndGet(); } else { if (Thread.currentThread().isInterrupted()) { noInt.incrementAndGet(); LOG.info("The thread should NOT be with the 'interrupt' status."); } } } finally { done.incrementAndGet(); } } } ; t.setName("TestClientOperationInterrupt #" + i); threads.add(t); t.start(); } for (int i=0; i < nbThread / 2; i++) { threads.get(i).interrupt(); } boolean stillAlive=true; while (stillAlive) { stillAlive=false; for ( Thread t : threads) { if (t.isAlive()) { stillAlive=true; } } Threads.sleep(10); } Assert.assertFalse(Thread.currentThread().isInterrupted()); Assert.assertTrue(" noEx: " + noEx.get() + ", badEx="+ badEx.get()+ ", noInt="+ noInt.get(),noEx.get() == nbThread / 2 && badEx.get() == 0); while (done.get() != nbThread) { Thread.sleep(1); } Table ht=util.getConnection().getTable(tableName); Result r=ht.get(new Get(row1)); Assert.assertFalse(r.isEmpty()); }

Class: org.apache.hadoop.hbase.client.TestClientPushback

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testClientTracksServerPushback() throws Exception { Configuration conf=UTIL.getConfiguration(); ClusterConnection conn=(ClusterConnection)ConnectionFactory.createConnection(conf); HTable table=(HTable)conn.getTable(tableName); HRegionServer rs=UTIL.getHBaseCluster().getRegionServer(0); Region region=rs.getOnlineRegions(tableName).get(0); LOG.debug("Writing some data to " + tableName); Put p=new Put(Bytes.toBytes("row")); p.addColumn(family,qualifier,Bytes.toBytes("value1")); table.put(p); int load=(int)((((HRegion)region).addAndGetGlobalMemstoreSize(0) * 100) / flushSizeBytes); LOG.debug("Done writing some data to " + tableName); ClientBackoffPolicy backoffPolicy=conn.getBackoffPolicy(); assertTrue("Backoff policy is not correctly configured",backoffPolicy instanceof ExponentialClientBackoffPolicy); ServerStatisticTracker stats=conn.getStatisticsTracker(); assertNotNull("No stats configured for the client!",stats); ServerName server=rs.getServerName(); byte[] regionName=region.getRegionInfo().getRegionName(); ServerStatistics serverStats=stats.getServerStatsForTesting(server); ServerStatistics.RegionStatistics regionStats=serverStats.getStatsForRegion(regionName); assertEquals("We did not find some load on the memstore",load,regionStats.getMemstoreLoadPercent()); long backoffTime=backoffPolicy.getBackoffTime(server,regionName,serverStats); assertNotEquals("Reported load does not produce a backoff",backoffTime,0); LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ "+ server+ " is "+ backoffTime); List ops=new ArrayList(1); ops.add(p); final CountDownLatch latch=new CountDownLatch(1); final AtomicLong endTime=new AtomicLong(); long startTime=EnvironmentEdgeManager.currentTime(); table.mutator.ap.submit(tableName,ops,true,new Batch.Callback(){ @Override public void update( byte[] region, byte[] row, Result result){ endTime.set(EnvironmentEdgeManager.currentTime()); latch.countDown(); } } ,true); String name=server.getServerName() + "," + Bytes.toStringBinary(regionName); MetricsConnection.RegionStats rsStats=conn.getConnectionMetrics().serverStats.get(server).get(regionName); assertEquals(name,rsStats.name); assertEquals(rsStats.heapOccupancyHist.getSnapshot().getMean(),(double)regionStats.getHeapOccupancyPercent(),0.1); assertEquals(rsStats.memstoreLoadHist.getSnapshot().getMean(),(double)regionStats.getMemstoreLoadPercent(),0.1); MetricsConnection.RunnerStats runnerStats=conn.getConnectionMetrics().runnerStats; assertEquals(runnerStats.delayRunners.getCount(),1); assertEquals(runnerStats.normalRunners.getCount(),1); assertEquals("",runnerStats.delayIntevalHist.getSnapshot().getMean(),(double)backoffTime,0.1); latch.await(backoffTime * 2,TimeUnit.MILLISECONDS); assertNotEquals("AsyncProcess did not submit the work time",endTime.get(),0); assertTrue("AsyncProcess did not delay long enough",endTime.get() - startTime >= backoffTime); }

Class: org.apache.hadoop.hbase.client.TestClientScanner

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testSizeLimit() throws IOException { final Result[] results=new Result[1]; KeyValue kv1=new KeyValue("row".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); results[0]=Result.create(new Cell[]{kv1}); RpcRetryingCaller caller=Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt())).thenAnswer(new Answer(){ private int count=0; @Override public Result[] answer( InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable=invocation.getArgumentAt(0,ScannerCallableWithReplicas.class); switch (count) { case 0: case 2: count++; return null; case 1: count++; callable.setHasMoreResultsContext(true); callable.setServerHasMoreResults(false); return results; default : throw new RuntimeException("Expected only 2 invocations"); } } } ); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); scan.setCaching(100); scan.setMaxResultSize(1); try (MockClientScanner scanner=new MockClientScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); InOrder inOrder=Mockito.inOrder(caller); scanner.loadCache(); inOrder.verify(caller,Mockito.times(2)).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); assertEquals(1,scanner.cache.size()); Result r=scanner.cache.poll(); assertNotNull(r); CellScanner cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv1,cs.current()); assertFalse(cs.advance()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testCacheLimit() throws IOException { KeyValue kv1=new KeyValue("row1".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv2=new KeyValue("row2".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv3=new KeyValue("row3".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); final Result[] results=new Result[]{Result.create(new Cell[]{kv1}),Result.create(new Cell[]{kv2}),Result.create(new Cell[]{kv3})}; RpcRetryingCaller caller=Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt())).thenAnswer(new Answer(){ private int count=0; @Override public Result[] answer( InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable=invocation.getArgumentAt(0,ScannerCallableWithReplicas.class); switch (count) { case 0: case 2: count++; return null; case 1: count++; callable.setHasMoreResultsContext(true); callable.setServerHasMoreResults(false); return results; default : throw new RuntimeException("Expected only 2 invocations"); } } } ); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); scan.setCaching(1); scan.setMaxResultSize(1000 * 1000); try (MockClientScanner scanner=new MockClientScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); InOrder inOrder=Mockito.inOrder(caller); scanner.loadCache(); inOrder.verify(caller,Mockito.times(2)).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); assertEquals(3,scanner.cache.size()); Result r=scanner.cache.poll(); assertNotNull(r); CellScanner cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv1,cs.current()); assertFalse(cs.advance()); r=scanner.cache.poll(); assertNotNull(r); cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv2,cs.current()); assertFalse(cs.advance()); r=scanner.cache.poll(); assertNotNull(r); cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv3,cs.current()); assertFalse(cs.advance()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testNoMoreResults() throws IOException { final Result[] results=new Result[1]; KeyValue kv1=new KeyValue("row".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); results[0]=Result.create(new Cell[]{kv1}); RpcRetryingCaller caller=Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt())).thenAnswer(new Answer(){ private int count=0; @Override public Result[] answer( InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable=invocation.getArgumentAt(0,ScannerCallableWithReplicas.class); switch (count) { case 0: case 2: count++; return null; case 1: count++; callable.setHasMoreResultsContext(true); callable.setServerHasMoreResults(false); return results; default : throw new RuntimeException("Expected only 2 invocations"); } } } ); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); scan.setCaching(100); scan.setMaxResultSize(1000 * 1000); try (MockClientScanner scanner=new MockClientScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); scanner.setRpcFinished(true); InOrder inOrder=Mockito.inOrder(caller); scanner.loadCache(); inOrder.verify(caller,Mockito.times(2)).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); assertEquals(1,scanner.cache.size()); Result r=scanner.cache.poll(); assertNotNull(r); CellScanner cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv1,cs.current()); assertFalse(cs.advance()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testMoreResults() throws IOException { final Result[] results1=new Result[1]; KeyValue kv1=new KeyValue("row".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); results1[0]=Result.create(new Cell[]{kv1}); final Result[] results2=new Result[1]; KeyValue kv2=new KeyValue("row2".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); results2[0]=Result.create(new Cell[]{kv2}); RpcRetryingCaller caller=Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt())).thenAnswer(new Answer(){ private int count=0; @Override public Result[] answer( InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable=invocation.getArgumentAt(0,ScannerCallableWithReplicas.class); switch (count) { case 0: case 3: count++; return null; case 1: count++; callable.setHasMoreResultsContext(true); callable.setServerHasMoreResults(true); return results1; case 2: count++; callable.setHasMoreResultsContext(true); callable.setServerHasMoreResults(false); return results2; default : throw new RuntimeException("Expected only 2 invocations"); } } } ); scan.setCaching(100); scan.setMaxResultSize(1000 * 1000); try (MockClientScanner scanner=new MockClientScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); InOrder inOrder=Mockito.inOrder(caller); scanner.loadCache(); inOrder.verify(caller,Mockito.times(2)).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); assertEquals(1,scanner.cache.size()); Result r=scanner.cache.poll(); assertNotNull(r); CellScanner cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv1,cs.current()); assertFalse(cs.advance()); scanner.setRpcFinished(true); inOrder=Mockito.inOrder(caller); scanner.loadCache(); inOrder.verify(caller,Mockito.times(3)).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); r=scanner.cache.poll(); assertNotNull(r); cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv2,cs.current()); assertFalse(cs.advance()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testNoResultsHint() throws IOException { final Result[] results=new Result[1]; KeyValue kv1=new KeyValue("row".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); results[0]=Result.create(new Cell[]{kv1}); RpcRetryingCaller caller=Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt())).thenAnswer(new Answer(){ private int count=0; @Override public Result[] answer( InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable=invocation.getArgumentAt(0,ScannerCallableWithReplicas.class); switch (count) { case 0: case 2: count++; return null; case 1: count++; callable.setHasMoreResultsContext(false); return results; default : throw new RuntimeException("Expected only 2 invocations"); } } } ); scan.setCaching(100); scan.setMaxResultSize(1000 * 1000); try (MockClientScanner scanner=new MockClientScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ scanner.setRpcFinished(true); InOrder inOrder=Mockito.inOrder(caller); scanner.loadCache(); inOrder.verify(caller,Mockito.times(2)).callWithoutRetries(Mockito.any(RetryingCallable.class),Mockito.anyInt()); assertEquals(1,scanner.cache.size()); Result r=scanner.cache.poll(); assertNotNull(r); CellScanner cs=r.cellScanner(); assertTrue(cs.advance()); assertEquals(kv1,cs.current()); assertFalse(cs.advance()); } }

Class: org.apache.hadoop.hbase.client.TestClientScannerRPCTimeout

InternalCallVerifier BooleanVerifier 
@Test public void testScannerNextRPCTimesout() throws Exception { final TableName TABLE_NAME=TableName.valueOf("testScannerNextRPCTimesout"); Table ht=TEST_UTIL.createTable(TABLE_NAME,FAMILY); byte[] r1=Bytes.toBytes("row-1"); byte[] r2=Bytes.toBytes("row-2"); byte[] r3=Bytes.toBytes("row-3"); putToTable(ht,r1); putToTable(ht,r2); putToTable(ht,r3); LOG.info("Wrote our three values"); RSRpcServicesWithScanTimeout.seqNoToSleepOn=1; Scan scan=new Scan(); scan.setCaching(1); ResultScanner scanner=ht.getScanner(scan); Result result=scanner.next(); assertTrue("Expected row: row-1",Bytes.equals(r1,result.getRow())); LOG.info("Got expected first row"); long t1=System.currentTimeMillis(); result=scanner.next(); assertTrue((System.currentTimeMillis() - t1) > rpcTimeout); assertTrue("Expected row: row-2",Bytes.equals(r2,result.getRow())); RSRpcServicesWithScanTimeout.seqNoToSleepOn=-1; result=scanner.next(); assertTrue("Expected row: row-3",Bytes.equals(r3,result.getRow())); scanner.close(); scanner=ht.getScanner(scan); RSRpcServicesWithScanTimeout.sleepAlways=true; RSRpcServicesWithScanTimeout.tryNumber=0; try { result=scanner.next(); } catch ( IOException ioe) { LOG.info("Failed after maximal attempts=" + CLIENT_RETRIES_NUMBER,ioe); } assertTrue("Expected maximal try number=" + CLIENT_RETRIES_NUMBER + ", actual ="+ RSRpcServicesWithScanTimeout.tryNumber,RSRpcServicesWithScanTimeout.tryNumber <= CLIENT_RETRIES_NUMBER); }

Class: org.apache.hadoop.hbase.client.TestClientSmallReversedScanner

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNoContextNoRecords() throws Exception { ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); scan.setCaching(2); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallReversedScanner csrs=new ClientSmallReversedScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ csrs.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,csrs.getScannerTimeout())).thenReturn(new Result[0]); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenThrow(new RuntimeException("Should not be called")); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); csrs.loadCache(); assertEquals(0,csrs.cache.size()); assertTrue(csrs.closed); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testContextPresent() throws Exception { final KeyValue kv1=new KeyValue("row1".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv2=new KeyValue("row2".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv3=new KeyValue("row3".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); @SuppressWarnings("unchecked") RpcRetryingCaller caller=Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallReversedScanner csrs=new ClientSmallReversedScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ csrs.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,csrs.getScannerTimeout())).thenAnswer(new Answer(){ int count=0; @Override public Result[] answer( InvocationOnMock invocation){ Result[] results; if (0 == count) { results=new Result[]{Result.create(new Cell[]{kv3}),Result.create(new Cell[]{kv2})}; } else if (1 == count) { results=new Result[]{Result.create(new Cell[]{kv1})}; } else { results=new Result[0]; } count++; return results; } } ); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenAnswer(createTrueThenFalseAnswer()); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); csrs.loadCache(); Queue results=csrs.cache; Iterator iter=results.iterator(); assertEquals(3,results.size()); for (int i=3; i >= 1 && iter.hasNext(); i--) { Result result=iter.next(); byte[] row=result.getRow(); assertEquals("row" + i,new String(row,StandardCharsets.UTF_8)); assertEquals(1,result.getMap().size()); } assertTrue(csrs.closed); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testNoContextFewerRecords() throws Exception { final KeyValue kv1=new KeyValue("row1".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv2=new KeyValue("row2".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv3=new KeyValue("row3".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); scan.setCaching(2); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallReversedScanner csrs=new ClientSmallReversedScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ csrs.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,csrs.getScannerTimeout())).thenAnswer(new Answer(){ int count=0; @Override public Result[] answer( InvocationOnMock invocation){ Result[] results; if (0 == count) { results=new Result[]{Result.create(new Cell[]{kv3}),Result.create(new Cell[]{kv2})}; } else if (1 == count) { results=new Result[]{Result.create(new Cell[]{kv1})}; } else { throw new RuntimeException("Should not fetch a third batch from the server"); } count++; return results; } } ); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenThrow(new RuntimeException("Should not be called")); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); csrs.loadCache(); Queue results=csrs.cache; Iterator iter=results.iterator(); assertEquals(2,results.size()); for (int i=3; i >= 2 && iter.hasNext(); i--) { Result result=iter.next(); byte[] row=result.getRow(); assertEquals("row" + i,new String(row,StandardCharsets.UTF_8)); assertEquals(1,result.getMap().size()); } results.clear(); csrs.loadCache(); assertEquals(1,results.size()); Result result=results.peek(); assertEquals("row1",new String(result.getRow(),StandardCharsets.UTF_8)); assertEquals(1,result.getMap().size()); assertTrue(csrs.closed); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContextNoRecords() throws Exception { ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallReversedScanner csrs=new ClientSmallReversedScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ csrs.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,csrs.getScannerTimeout())).thenReturn(new Result[0]); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenReturn(false); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); csrs.loadCache(); assertEquals(0,csrs.cache.size()); assertTrue(csrs.closed); } }

Class: org.apache.hadoop.hbase.client.TestClientSmallScanner

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNoContextNoRecords() throws Exception { ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); scan.setCaching(2); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallScanner css=new ClientSmallScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ css.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,css.getScannerTimeout())).thenReturn(new Result[0]); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenThrow(new RuntimeException("Should not be called")); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); css.loadCache(); assertEquals(0,css.cache.size()); assertTrue(css.closed); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testNoContextFewerRecords() throws Exception { final KeyValue kv1=new KeyValue("row1".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv2=new KeyValue("row2".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv3=new KeyValue("row3".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); scan.setCaching(2); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallScanner css=new ClientSmallScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ css.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,css.getScannerTimeout())).thenAnswer(new Answer(){ int count=0; @Override public Result[] answer( InvocationOnMock invocation){ Result[] results; if (0 == count) { results=new Result[]{Result.create(new Cell[]{kv1}),Result.create(new Cell[]{kv2})}; } else if (1 == count) { results=new Result[]{Result.create(new Cell[]{kv3})}; } else { throw new RuntimeException("Should not fetch a third batch from the server"); } count++; return results; } } ); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenThrow(new RuntimeException("Should not be called")); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); css.loadCache(); Queue results=css.cache; assertEquals(2,results.size()); for (int i=1; i <= 2; i++) { Result result=results.poll(); byte[] row=result.getRow(); assertEquals("row" + i,new String(row,StandardCharsets.UTF_8)); assertEquals(1,result.getMap().size()); } results.clear(); css.loadCache(); assertEquals(1,results.size()); Result result=results.peek(); assertEquals("row3",new String(result.getRow(),StandardCharsets.UTF_8)); assertEquals(1,result.getMap().size()); assertTrue(css.closed); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testContextNoRecords() throws Exception { ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallScanner css=new ClientSmallScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ css.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,css.getScannerTimeout())).thenReturn(new Result[0]); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenReturn(false); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); css.loadCache(); assertEquals(0,css.cache.size()); assertTrue(css.closed); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testContextPresent() throws Exception { final KeyValue kv1=new KeyValue("row1".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv2=new KeyValue("row2".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum), kv3=new KeyValue("row3".getBytes(),"cf".getBytes(),"cq".getBytes(),1,Type.Maximum); ScannerCallableWithReplicas callableWithReplicas=Mockito.mock(ScannerCallableWithReplicas.class); @SuppressWarnings("unchecked") RpcRetryingCaller caller=Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory.newCaller()).thenReturn(caller); SmallScannerCallableFactory factory=getFactory(callableWithReplicas); try (ClientSmallScanner css=new ClientSmallScanner(conf,scan,TableName.valueOf("table"),clusterConn,rpcFactory,controllerFactory,pool,Integer.MAX_VALUE)){ css.setScannerCallableFactory(factory); Mockito.when(caller.callWithoutRetries(callableWithReplicas,css.getScannerTimeout())).thenAnswer(new Answer(){ int count=0; @Override public Result[] answer( InvocationOnMock invocation){ Result[] results; if (0 == count) { results=new Result[]{Result.create(new Cell[]{kv1}),Result.create(new Cell[]{kv2})}; } else if (1 == count) { results=new Result[]{Result.create(new Cell[]{kv3})}; } else { results=new Result[0]; } count++; return results; } } ); Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenAnswer(createTrueThenFalseAnswer()); HRegionInfo regionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); css.loadCache(); Queue results=css.cache; assertEquals(3,results.size()); for (int i=1; i <= 3; i++) { Result result=results.poll(); byte[] row=result.getRow(); assertEquals("row" + i,new String(row,StandardCharsets.UTF_8)); assertEquals(1,result.getMap().size()); } assertTrue(css.closed); } }

Class: org.apache.hadoop.hbase.client.TestFromClientSide

InternalCallVerifier EqualityVerifier 
@Test public void testDeleteFamilyVersionWithOtherDeletes() throws Exception { TableName TABLE=TableName.valueOf("testDeleteFamilyVersionWithOtherDeletes"); byte[][] QUALIFIERS=makeNAscii(QUALIFIER,5); byte[][] VALUES=makeN(VALUE,5); long[] ts={1000,2000,3000,4000,5000}; Admin admin=TEST_UTIL.getHBaseAdmin(); Table ht=TEST_UTIL.createTable(TABLE,FAMILY,5); Put put=null; Result result=null; Get get=null; Delete delete=null; put=new Put(ROW); for (int q=0; q < 5; q++) for (int t=0; t < 5; t++) put.addColumn(FAMILY,QUALIFIERS[q],ts[t],VALUES[t]); ht.put(put); admin.flush(TABLE); byte[] ROW2=Bytes.toBytes("myRowForTest"); put=new Put(ROW2); for (int q=0; q < 5; q++) for (int t=0; t < 5; t++) put.addColumn(FAMILY,QUALIFIERS[q],ts[t],VALUES[t]); ht.put(put); admin.flush(TABLE); delete=new Delete(ROW); delete.addFamily(FAMILY,ts[1]); delete.addFamilyVersion(FAMILY,ts[3]); delete.addColumns(FAMILY,QUALIFIERS[0],ts[2]); delete.addColumns(FAMILY,QUALIFIERS[2],ts[4]); delete.addColumn(FAMILY,QUALIFIERS[4],ts[4]); ht.delete(delete); admin.flush(TABLE); delete=new Delete(ROW2); delete.addFamilyVersion(FAMILY,ts[1]); delete.addFamilyVersion(FAMILY,ts[3]); ht.delete(delete); admin.flush(TABLE); get=new Get(ROW); get.addColumn(FAMILY,QUALIFIERS[0]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILY,QUALIFIERS[0],new long[]{ts[4]},new byte[][]{VALUES[4]},0,0); get=new Get(ROW); get.addColumn(FAMILY,QUALIFIERS[1]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILY,QUALIFIERS[1],new long[]{ts[2],ts[4]},new byte[][]{VALUES[2],VALUES[4]},0,1); get=new Get(ROW); get.addColumn(FAMILY,QUALIFIERS[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertEquals(0,result.size()); get=new Get(ROW); get.addColumn(FAMILY,QUALIFIERS[3]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILY,QUALIFIERS[3],new long[]{ts[2],ts[4]},new byte[][]{VALUES[2],VALUES[4]},0,1); get=new Get(ROW); get.addColumn(FAMILY,QUALIFIERS[4]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILY,QUALIFIERS[4],new long[]{ts[2]},new byte[][]{VALUES[2]},0,0); for (int i=0; i < 5; i++) { get=new Get(ROW2); get.addColumn(FAMILY,QUALIFIERS[i]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW2,FAMILY,QUALIFIERS[i],new long[]{ts[0],ts[2],ts[4]},new byte[][]{VALUES[0],VALUES[2],VALUES[4]},0,2); } ht.close(); admin.close(); }

InternalCallVerifier EqualityVerifier 
@Test public void testKeyOnlyFilterWithReverseScan() throws Exception { TableName TABLE=TableName.valueOf("testKeyOnlyFilterWithReverseScan"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); byte[][] ROWS=makeN(ROW,10); byte[][] QUALIFIERS={Bytes.toBytes("col0--"),Bytes.toBytes("col1--"),Bytes.toBytes("col2--"),Bytes.toBytes("col3--"),Bytes.toBytes("col4--"),Bytes.toBytes("col5--"),Bytes.toBytes("col6--"),Bytes.toBytes("col7--"),Bytes.toBytes("col8--"),Bytes.toBytes("col9--")}; for (int i=0; i < 10; i++) { Put put=new Put(ROWS[i]); put.addColumn(FAMILY,QUALIFIERS[i],VALUE); ht.put(put); } Scan scan=new Scan(); scan.setReversed(true); scan.addFamily(FAMILY); Filter filter=new KeyOnlyFilter(true); scan.setFilter(filter); ResultScanner scanner=ht.getScanner(scan); int count=0; for ( Result result : ht.getScanner(scan)) { assertEquals(result.size(),1); assertEquals(result.rawCells()[0].getValueLength(),Bytes.SIZEOF_INT); assertEquals(Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0])),VALUE.length); count++; } assertEquals(count,10); scanner.close(); ht.close(); }

IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testGetStartEndKeysWithRegionReplicas() throws IOException { HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testGetStartEndKeys")); HColumnDescriptor fam=new HColumnDescriptor(FAMILY); htd.addFamily(fam); byte[][] KEYS=HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE; Admin admin=TEST_UTIL.getHBaseAdmin(); admin.createTable(htd,KEYS); List regions=admin.getTableRegions(htd.getTableName()); HRegionLocator locator=(HRegionLocator)admin.getConnection().getRegionLocator(htd.getTableName()); for (int regionReplication=1; regionReplication < 4; regionReplication++) { List regionLocations=new ArrayList(); for ( HRegionInfo region : regions) { HRegionLocation[] arr=new HRegionLocation[regionReplication]; for (int i=0; i < arr.length; i++) { arr[i]=new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(region,i),null); } regionLocations.add(new RegionLocations(arr)); } Pair startEndKeys=locator.getStartEndKeys(regionLocations); assertEquals(KEYS.length + 1,startEndKeys.getFirst().length); for (int i=0; i < KEYS.length + 1; i++) { byte[] startKey=i == 0 ? HConstants.EMPTY_START_ROW : KEYS[i - 1]; byte[] endKey=i == KEYS.length ? HConstants.EMPTY_END_ROW : KEYS[i]; assertArrayEquals(startKey,startEndKeys.getFirst()[i]); assertArrayEquals(endKey,startEndKeys.getSecond()[i]); } } }

InternalCallVerifier EqualityVerifier 
@Test public void testGet_NullQualifier() throws IOException { Table table=TEST_UTIL.createTable(TableName.valueOf("testGet_NullQualifier"),FAMILY); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); put=new Put(ROW); put.addColumn(FAMILY,null,VALUE); table.put(put); LOG.info("Row put"); Get get=new Get(ROW); get.addColumn(FAMILY,null); Result r=table.get(get); assertEquals(1,r.size()); get=new Get(ROW); get.addFamily(FAMILY); r=table.get(get); assertEquals(2,r.size()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMiscHTableStuff() throws IOException { final TableName tableAname=TableName.valueOf("testMiscHTableStuffA"); final TableName tableBname=TableName.valueOf("testMiscHTableStuffB"); final byte[] attrName=Bytes.toBytes("TESTATTR"); final byte[] attrValue=Bytes.toBytes("somevalue"); byte[] value=Bytes.toBytes("value"); Table a=TEST_UTIL.createTable(tableAname,HConstants.CATALOG_FAMILY); Table b=TEST_UTIL.createTable(tableBname,HConstants.CATALOG_FAMILY); Put put=new Put(ROW); put.addColumn(HConstants.CATALOG_FAMILY,null,value); a.put(put); Table newA=TEST_UTIL.getConnection().getTable(tableAname); Scan scan=new Scan(); scan.addFamily(HConstants.CATALOG_FAMILY); ResultScanner s=newA.getScanner(scan); try { for ( Result r : s) { put=new Put(r.getRow()); put.setDurability(Durability.SKIP_WAL); for ( Cell kv : r.rawCells()) { put.add(kv); } b.put(put); } } finally { s.close(); } Table anotherA=TEST_UTIL.getConnection().getTable(tableAname); Get get=new Get(ROW); get.addFamily(HConstants.CATALOG_FAMILY); anotherA.get(get); Admin admin=TEST_UTIL.getHBaseAdmin(); HTableDescriptor desc=new HTableDescriptor(a.getTableDescriptor()); admin.disableTable(tableAname); desc.setValue(attrName,attrValue); for ( HColumnDescriptor c : desc.getFamilies()) c.setValue(attrName,attrValue); admin.modifyTable(tableAname,desc); admin.enableTable(tableAname); desc=a.getTableDescriptor(); assertEquals("wrong table descriptor returned",desc.getTableName(),tableAname); value=desc.getValue(attrName); assertFalse("missing HTD attribute value",value == null); assertFalse("HTD attribute value is incorrect",Bytes.compareTo(value,attrValue) != 0); for ( HColumnDescriptor c : desc.getFamilies()) { value=c.getValue(attrName); assertFalse("missing HCD attribute value",value == null); assertFalse("HCD attribute value is incorrect",Bytes.compareTo(value,attrValue) != 0); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Basic client side validation of HBASE-4536 */ @Test public void testKeepDeletedCells() throws Exception { final TableName TABLENAME=TableName.valueOf("testKeepDeletesCells"); final byte[] FAMILY=Bytes.toBytes("family"); final byte[] C0=Bytes.toBytes("c0"); final byte[] T1=Bytes.toBytes("T1"); final byte[] T2=Bytes.toBytes("T2"); final byte[] T3=Bytes.toBytes("T3"); HColumnDescriptor hcd=new HColumnDescriptor(FAMILY).setKeepDeletedCells(KeepDeletedCells.TRUE).setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE).setMaxVersions(3); HTableDescriptor desc=new HTableDescriptor(TABLENAME); desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); Table h=TEST_UTIL.getConnection().getTable(TABLENAME); long ts=System.currentTimeMillis(); Put p=new Put(T1,ts); p.addColumn(FAMILY,C0,T1); h.put(p); p=new Put(T1,ts + 2); p.addColumn(FAMILY,C0,T2); h.put(p); p=new Put(T1,ts + 4); p.addColumn(FAMILY,C0,T3); h.put(p); Delete d=new Delete(T1,ts + 3); h.delete(d); d=new Delete(T1,ts + 3); d.addColumns(FAMILY,C0,ts + 3); h.delete(d); Get g=new Get(T1); g.setTimeRange(0,ts + 3); Result r=h.get(g); assertArrayEquals(T2,r.getValue(FAMILY,C0)); Scan s=new Scan(T1); s.setTimeRange(0,ts + 3); s.setMaxVersions(); ResultScanner scanner=h.getScanner(s); Cell[] kvs=scanner.next().rawCells(); assertArrayEquals(T2,CellUtil.cloneValue(kvs[0])); assertArrayEquals(T1,CellUtil.cloneValue(kvs[1])); scanner.close(); s=new Scan(T1); s.setRaw(true); s.setMaxVersions(); scanner=h.getScanner(s); kvs=scanner.next().rawCells(); assertTrue(CellUtil.isDeleteFamily(kvs[0])); assertArrayEquals(T3,CellUtil.cloneValue(kvs[1])); assertTrue(CellUtil.isDelete(kvs[2])); assertArrayEquals(T2,CellUtil.cloneValue(kvs[3])); assertArrayEquals(T1,CellUtil.cloneValue(kvs[4])); scanner.close(); h.close(); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Tests reversed scan under multi regions */ @Test public void testReversedScanUnderMultiRegions() throws Exception { TableName TABLE=TableName.valueOf("testReversedScanUnderMultiRegions"); byte[] maxByteArray=ReversedClientScanner.MAX_BYTE_ARRAY; byte[][] splitRows=new byte[][]{Bytes.toBytes("005"),Bytes.add(Bytes.toBytes("005"),Bytes.multiple(maxByteArray,16)),Bytes.toBytes("006"),Bytes.add(Bytes.toBytes("006"),Bytes.multiple(maxByteArray,8)),Bytes.toBytes("007"),Bytes.add(Bytes.toBytes("007"),Bytes.multiple(maxByteArray,4)),Bytes.toBytes("008"),Bytes.multiple(maxByteArray,2)}; Table table=TEST_UTIL.createTable(TABLE,FAMILY,splitRows); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(TABLE)){ assertEquals(splitRows.length + 1,l.getAllRegionLocations().size()); } int insertNum=splitRows.length; for (int i=0; i < insertNum; i++) { Put put=new Put(splitRows[i]); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); } ResultScanner scanner=table.getScanner(new Scan()); int count=0; for ( Result r : scanner) { assertTrue(!r.isEmpty()); count++; } assertEquals(insertNum,count); Scan scan=new Scan(); scan.setReversed(true); scanner=table.getScanner(scan); count=0; byte[] lastRow=null; for ( Result r : scanner) { assertTrue(!r.isEmpty()); count++; byte[] thisRow=r.getRow(); if (lastRow != null) { assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row="+ Bytes.toString(thisRow),Bytes.compareTo(thisRow,lastRow) < 0); } lastRow=thisRow; } assertEquals(insertNum,count); table.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFilters() throws Exception { TableName TABLE=TableName.valueOf("testFilters"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); byte[][] ROWS=makeN(ROW,10); byte[][] QUALIFIERS={Bytes.toBytes("col0--"),Bytes.toBytes("col1--"),Bytes.toBytes("col2--"),Bytes.toBytes("col3--"),Bytes.toBytes("col4--"),Bytes.toBytes("col5--"),Bytes.toBytes("col6--"),Bytes.toBytes("col7--"),Bytes.toBytes("col8--"),Bytes.toBytes("col9--")}; for (int i=0; i < 10; i++) { Put put=new Put(ROWS[i]); put.setDurability(Durability.SKIP_WAL); put.addColumn(FAMILY,QUALIFIERS[i],VALUE); ht.put(put); } Scan scan=new Scan(); scan.addFamily(FAMILY); Filter filter=new QualifierFilter(CompareOp.EQUAL,new RegexStringComparator("col[1-5]")); scan.setFilter(filter); ResultScanner scanner=ht.getScanner(scan); int expectedIndex=1; for ( Result result : ht.getScanner(scan)) { assertEquals(result.size(),1); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]),ROWS[expectedIndex])); assertTrue(Bytes.equals(CellUtil.cloneQualifier(result.rawCells()[0]),QUALIFIERS[expectedIndex])); expectedIndex++; } assertEquals(expectedIndex,6); scanner.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that cache on write works all the way up from the client-side. * Performs inserts, flushes, and compactions, verifying changes in the block * cache along the way. * @throws Exception */ @Test public void testCacheOnWriteEvictOnClose() throws Exception { TableName tableName=TableName.valueOf("testCOWEOCfromClient"); byte[] data=Bytes.toBytes("data"); Table table=TEST_UTIL.createTable(tableName,FAMILY); try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); Store store=region.getStores().iterator().next(); CacheConfig cacheConf=store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); BlockCache cache=cacheConf.getBlockCache(); long startBlockCount=cache.getBlockCount(); long startBlockHits=cache.getStats().getHitCount(); long startBlockMiss=cache.getStats().getMissCount(); for (int i=0; i < 5; i++) { Thread.sleep(100); if (startBlockCount != cache.getBlockCount() || startBlockHits != cache.getStats().getHitCount() || startBlockMiss != cache.getStats().getMissCount()) { startBlockCount=cache.getBlockCount(); startBlockHits=cache.getStats().getHitCount(); startBlockMiss=cache.getStats().getMissCount(); i=-1; } } Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,data); table.put(put); assertTrue(Bytes.equals(table.get(new Get(ROW)).value(),data)); assertEquals(startBlockCount,cache.getBlockCount()); assertEquals(startBlockHits,cache.getStats().getHitCount()); assertEquals(startBlockMiss,cache.getStats().getMissCount()); System.out.println("Flushing cache"); region.flush(true); long expectedBlockCount=startBlockCount + 1; long expectedBlockHits=startBlockHits; long expectedBlockMiss=startBlockMiss; assertEquals(expectedBlockCount,cache.getBlockCount()); assertEquals(expectedBlockHits,cache.getStats().getHitCount()); assertEquals(expectedBlockMiss,cache.getStats().getMissCount()); assertTrue(Bytes.equals(table.get(new Get(ROW)).value(),data)); assertEquals(expectedBlockCount,cache.getBlockCount()); assertEquals(++expectedBlockHits,cache.getStats().getHitCount()); assertEquals(expectedBlockMiss,cache.getStats().getMissCount()); byte[] QUALIFIER2=Bytes.add(QUALIFIER,QUALIFIER); byte[] data2=Bytes.add(data,data); put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER2,data2); table.put(put); Result r=table.get(new Get(ROW)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER),data)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER2),data2)); assertEquals(expectedBlockCount,cache.getBlockCount()); assertEquals(++expectedBlockHits,cache.getStats().getHitCount()); assertEquals(expectedBlockMiss,cache.getStats().getMissCount()); System.out.println("Flushing cache"); region.flush(true); assertEquals(++expectedBlockCount,cache.getBlockCount()); assertEquals(expectedBlockHits,cache.getStats().getHitCount()); assertEquals(expectedBlockMiss,cache.getStats().getMissCount()); System.out.println("Compacting"); assertEquals(2,store.getStorefilesCount()); store.triggerMajorCompaction(); region.compact(true); waitForStoreFileCount(store,1,10000); assertEquals(1,store.getStorefilesCount()); expectedBlockCount-=2; assertEquals(expectedBlockCount,cache.getBlockCount()); expectedBlockHits+=2; assertEquals(expectedBlockMiss,cache.getStats().getMissCount()); assertEquals(expectedBlockHits,cache.getStats().getHitCount()); r=table.get(new Get(ROW)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER),data)); assertTrue(Bytes.equals(r.getValue(FAMILY,QUALIFIER2),data2)); expectedBlockCount+=1; assertEquals(expectedBlockCount,cache.getBlockCount()); assertEquals(expectedBlockHits,cache.getStats().getHitCount()); assertEquals(++expectedBlockMiss,cache.getStats().getMissCount()); } }

InternalCallVerifier EqualityVerifier 
@Test public void testJira6912() throws Exception { TableName TABLE=TableName.valueOf("testJira6912"); Table foo=TEST_UTIL.createTable(TABLE,new byte[][]{FAMILY},10); List puts=new ArrayList(); for (int i=0; i != 100; i++) { Put put=new Put(Bytes.toBytes(i)); put.addColumn(FAMILY,FAMILY,Bytes.toBytes(i)); puts.add(put); } foo.put(puts); TEST_UTIL.flush(); Scan scan=new Scan(); scan.setStartRow(Bytes.toBytes(1)); scan.setStopRow(Bytes.toBytes(3)); scan.addColumn(FAMILY,FAMILY); scan.setFilter(new RowFilter(CompareFilter.CompareOp.NOT_EQUAL,new BinaryComparator(Bytes.toBytes(1)))); ResultScanner scanner=foo.getScanner(scan); Result[] bar=scanner.next(100); assertEquals(1,bar.length); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClientPoolRoundRobin() throws IOException { final TableName tableName=TableName.valueOf("testClientPoolRoundRobin"); int poolSize=3; int numVersions=poolSize * 2; Configuration conf=TEST_UTIL.getConfiguration(); conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE,"round-robin"); conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE,poolSize); Table table=TEST_UTIL.createTable(tableName,new byte[][]{FAMILY},Integer.MAX_VALUE); final long ts=EnvironmentEdgeManager.currentTime(); Get get=new Get(ROW); get.addColumn(FAMILY,QUALIFIER); get.setMaxVersions(); for (int versions=1; versions <= numVersions; versions++) { Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,ts + versions,VALUE); table.put(put); Result result=table.get(get); NavigableMap navigableMap=result.getMap().get(FAMILY).get(QUALIFIER); assertEquals("The number of versions of '" + FAMILY + ":"+ QUALIFIER+ " did not match "+ versions,versions,navigableMap.size()); for ( Map.Entry entry : navigableMap.entrySet()) { assertTrue("The value at time " + entry.getKey() + " did not match what was put",Bytes.equals(VALUE,entry.getValue())); } } }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRowMutation() throws Exception { LOG.info("Starting testRowMutation"); final TableName TABLENAME=TableName.valueOf("testRowMutation"); Table t=TEST_UTIL.createTable(TABLENAME,FAMILY); byte[][] QUALIFIERS=new byte[][]{Bytes.toBytes("a"),Bytes.toBytes("b")}; RowMutations arm=new RowMutations(ROW); Put p=new Put(ROW); p.addColumn(FAMILY,QUALIFIERS[0],VALUE); arm.add(p); t.mutateRow(arm); Get g=new Get(ROW); Result r=t.get(g); assertEquals(0,Bytes.compareTo(VALUE,r.getValue(FAMILY,QUALIFIERS[0]))); arm=new RowMutations(ROW); p=new Put(ROW); p.addColumn(FAMILY,QUALIFIERS[1],VALUE); arm.add(p); Delete d=new Delete(ROW); d.addColumns(FAMILY,QUALIFIERS[0]); arm.add(d); t.mutateRow(arm); r=t.get(g); assertEquals(0,Bytes.compareTo(VALUE,r.getValue(FAMILY,QUALIFIERS[1]))); assertNull(r.getValue(FAMILY,QUALIFIERS[0])); try { arm=new RowMutations(ROW); p=new Put(ROW); p.addColumn(new byte[]{'b','o','g','u','s'},QUALIFIERS[0],VALUE); arm.add(p); t.mutateRow(arm); fail("Expected NoSuchColumnFamilyException"); } catch ( NoSuchColumnFamilyException e) { } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
/** * HBASE-867 * If millions of columns in a column family, hbase scanner won't come up * Test will create numRows rows, each with numColsPerRow columns * (1 version each), and attempt to scan them all. * To test at scale, up numColsPerRow to the millions * (have not gotten that to work running as junit though) */ @Test public void testJiraTest867() throws Exception { int numRows=10; int numColsPerRow=2000; TableName TABLE=TableName.valueOf("testJiraTest867"); byte[][] ROWS=makeN(ROW,numRows); byte[][] QUALIFIERS=makeN(QUALIFIER,numColsPerRow); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); for (int i=0; i < numRows; i++) { Put put=new Put(ROWS[i]); put.setDurability(Durability.SKIP_WAL); for (int j=0; j < numColsPerRow; j++) { put.addColumn(FAMILY,QUALIFIERS[j],QUALIFIERS[j]); } assertTrue("Put expected to contain " + numColsPerRow + " columns but "+ "only contains "+ put.size(),put.size() == numColsPerRow); ht.put(put); } Get get=new Get(ROWS[numRows - 1]); Result result=ht.get(get); assertNumKeys(result,numColsPerRow); Cell[] keys=result.rawCells(); for (int i=0; i < result.size(); i++) { assertKey(keys[i],ROWS[numRows - 1],FAMILY,QUALIFIERS[i],QUALIFIERS[i]); } Scan scan=new Scan(); ResultScanner scanner=ht.getScanner(scan); int rowCount=0; while ((result=scanner.next()) != null) { assertNumKeys(result,numColsPerRow); Cell[] kvs=result.rawCells(); for (int i=0; i < numColsPerRow; i++) { assertKey(kvs[i],ROWS[rowCount],FAMILY,QUALIFIERS[i],QUALIFIERS[i]); } rowCount++; } scanner.close(); assertTrue("Expected to scan " + numRows + " rows but actually scanned "+ rowCount+ " rows",rowCount == numRows); TEST_UTIL.flush(); get=new Get(ROWS[numRows - 1]); result=ht.get(get); assertNumKeys(result,numColsPerRow); keys=result.rawCells(); for (int i=0; i < result.size(); i++) { assertKey(keys[i],ROWS[numRows - 1],FAMILY,QUALIFIERS[i],QUALIFIERS[i]); } scan=new Scan(); scanner=ht.getScanner(scan); rowCount=0; while ((result=scanner.next()) != null) { assertNumKeys(result,numColsPerRow); Cell[] kvs=result.rawCells(); for (int i=0; i < numColsPerRow; i++) { assertKey(kvs[i],ROWS[rowCount],FAMILY,QUALIFIERS[i],QUALIFIERS[i]); } rowCount++; } scanner.close(); assertTrue("Expected to scan " + numRows + " rows but actually scanned "+ rowCount+ " rows",rowCount == numRows); }

InternalCallVerifier EqualityVerifier 
@Test public void testScan_NullQualifier() throws IOException { Table table=TEST_UTIL.createTable(TableName.valueOf("testScan_NullQualifier"),FAMILY); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); put=new Put(ROW); put.addColumn(FAMILY,null,VALUE); table.put(put); LOG.info("Row put"); Scan scan=new Scan(); scan.addColumn(FAMILY,null); ResultScanner scanner=table.getScanner(scan); Result[] bar=scanner.next(100); assertEquals(1,bar.length); assertEquals(1,bar[0].size()); scan=new Scan(); scan.addFamily(FAMILY); scanner=table.getScanner(scan); bar=scanner.next(100); assertEquals(1,bar.length); assertEquals(2,bar[0].size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testUpdates() throws Exception { TableName TABLE=TableName.valueOf("testUpdates"); Table hTable=TEST_UTIL.createTable(TABLE,FAMILY,10); byte[] row=Bytes.toBytes("row1"); byte[] qualifier=Bytes.toBytes("myCol"); Put put=new Put(row); put.addColumn(FAMILY,qualifier,1L,Bytes.toBytes("AAA")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,2L,Bytes.toBytes("BBB")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,3L,Bytes.toBytes("EEE")); hTable.put(put); Get get=new Get(row); get.addColumn(FAMILY,qualifier); get.setMaxVersions(); Result result=hTable.get(get); NavigableMap navigableMap=result.getMap().get(FAMILY).get(qualifier); assertEquals("AAA",Bytes.toString(navigableMap.get(1L))); assertEquals("BBB",Bytes.toString(navigableMap.get(2L))); put=new Put(row); put.addColumn(FAMILY,qualifier,1L,Bytes.toBytes("CCC")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,2L,Bytes.toBytes("DDD")); hTable.put(put); result=hTable.get(get); navigableMap=result.getMap().get(FAMILY).get(qualifier); assertEquals("CCC",Bytes.toString(navigableMap.get(1L))); assertEquals("DDD",Bytes.toString(navigableMap.get(2L))); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test filters when multiple regions. It does counts. Needs eye-balling of * logs to ensure that we're not scanning more regions that we're supposed to. * Related to the TestFilterAcrossRegions over in the o.a.h.h.filter package. * @throws IOException * @throws InterruptedException */ @Test public void testFilterAcrossMultipleRegions() throws IOException, InterruptedException { TableName name=TableName.valueOf("testFilterAcrossMutlipleRegions"); Table t=TEST_UTIL.createTable(name,FAMILY); int rowCount=TEST_UTIL.loadTable(t,FAMILY,false); assertRowCount(t,rowCount); List regions=splitTable(t); assertRowCount(t,rowCount); byte[] endKey=regions.get(0).getRegionInfo().getEndKey(); int endKeyCount=countRows(t,createScanWithRowFilter(endKey)); assertTrue(endKeyCount < rowCount); byte[] key=new byte[]{endKey[0],endKey[1],(byte)(endKey[2] + 1)}; int plusOneCount=countRows(t,createScanWithRowFilter(key)); assertEquals(endKeyCount + 1,plusOneCount); key=new byte[]{endKey[0],endKey[1],(byte)(endKey[2] + 2)}; int plusTwoCount=countRows(t,createScanWithRowFilter(key)); assertEquals(endKeyCount + 2,plusTwoCount); key=new byte[]{endKey[0],endKey[1],(byte)(endKey[2] - 1)}; int minusOneCount=countRows(t,createScanWithRowFilter(key)); assertEquals(endKeyCount - 1,minusOneCount); key=new byte[]{'a','a','a'}; int countBBB=countRows(t,createScanWithRowFilter(key,null,CompareFilter.CompareOp.EQUAL)); assertEquals(1,countBBB); int countGreater=countRows(t,createScanWithRowFilter(endKey,null,CompareFilter.CompareOp.GREATER_OR_EQUAL)); assertEquals(0,countGreater); countGreater=countRows(t,createScanWithRowFilter(endKey,endKey,CompareFilter.CompareOp.GREATER_OR_EQUAL)); assertEquals(rowCount - endKeyCount,countGreater); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFiltersWithReverseScan() throws Exception { TableName TABLE=TableName.valueOf("testFiltersWithReverseScan"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); byte[][] ROWS=makeN(ROW,10); byte[][] QUALIFIERS={Bytes.toBytes("col0--"),Bytes.toBytes("col1--"),Bytes.toBytes("col2--"),Bytes.toBytes("col3--"),Bytes.toBytes("col4--"),Bytes.toBytes("col5--"),Bytes.toBytes("col6--"),Bytes.toBytes("col7--"),Bytes.toBytes("col8--"),Bytes.toBytes("col9--")}; for (int i=0; i < 10; i++) { Put put=new Put(ROWS[i]); put.addColumn(FAMILY,QUALIFIERS[i],VALUE); ht.put(put); } Scan scan=new Scan(); scan.setReversed(true); scan.addFamily(FAMILY); Filter filter=new QualifierFilter(CompareOp.EQUAL,new RegexStringComparator("col[1-5]")); scan.setFilter(filter); ResultScanner scanner=ht.getScanner(scan); int expectedIndex=5; for ( Result result : scanner) { assertEquals(result.size(),1); Cell c=result.rawCells()[0]; assertTrue(Bytes.equals(c.getRowArray(),c.getRowOffset(),c.getRowLength(),ROWS[expectedIndex],0,ROWS[expectedIndex].length)); assertTrue(Bytes.equals(c.getQualifierArray(),c.getQualifierOffset(),c.getQualifierLength(),QUALIFIERS[expectedIndex],0,QUALIFIERS[expectedIndex].length)); expectedIndex--; } assertEquals(expectedIndex,0); scanner.close(); ht.close(); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier IgnoredMethod 
@Ignore("Flakey: HBASE-8989") @Test public void testClientPoolThreadLocal() throws IOException { final TableName tableName=TableName.valueOf("testClientPoolThreadLocal"); int poolSize=Integer.MAX_VALUE; int numVersions=3; Configuration conf=TEST_UTIL.getConfiguration(); conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE,"thread-local"); conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE,poolSize); final Table table=TEST_UTIL.createTable(tableName,new byte[][]{FAMILY},3); final long ts=EnvironmentEdgeManager.currentTime(); final Get get=new Get(ROW); get.addColumn(FAMILY,QUALIFIER); get.setMaxVersions(); for (int versions=1; versions <= numVersions; versions++) { Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,ts + versions,VALUE); table.put(put); Result result=table.get(get); NavigableMap navigableMap=result.getMap().get(FAMILY).get(QUALIFIER); assertEquals("The number of versions of '" + FAMILY + ":"+ QUALIFIER+ " did not match "+ versions+ "; "+ put.toString()+ ", "+ get.toString(),versions,navigableMap.size()); for ( Map.Entry entry : navigableMap.entrySet()) { assertTrue("The value at time " + entry.getKey() + " did not match what was put",Bytes.equals(VALUE,entry.getValue())); } } final Object waitLock=new Object(); ExecutorService executorService=Executors.newFixedThreadPool(numVersions); final AtomicReference error=new AtomicReference(null); for (int versions=numVersions; versions < numVersions * 2; versions++) { final int versionsCopy=versions; executorService.submit(new Callable(){ @Override public Void call(){ try { Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,ts + versionsCopy,VALUE); table.put(put); Result result=table.get(get); NavigableMap navigableMap=result.getMap().get(FAMILY).get(QUALIFIER); assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":"+ Bytes.toString(QUALIFIER)+ " did not match "+ versionsCopy,versionsCopy,navigableMap.size()); for ( Map.Entry entry : navigableMap.entrySet()) { assertTrue("The value at time " + entry.getKey() + " did not match what was put",Bytes.equals(VALUE,entry.getValue())); } synchronized (waitLock) { waitLock.wait(); } } catch ( Exception e) { } catch ( AssertionError e) { error.set(e); LOG.error(e); } return null; } } ); } synchronized (waitLock) { waitLock.notifyAll(); } executorService.shutdownNow(); assertNull(error.get()); }

InternalCallVerifier EqualityVerifier 
@Test public void testKeyOnlyFilter() throws Exception { TableName TABLE=TableName.valueOf("testKeyOnlyFilter"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); byte[][] ROWS=makeN(ROW,10); byte[][] QUALIFIERS={Bytes.toBytes("col0--"),Bytes.toBytes("col1--"),Bytes.toBytes("col2--"),Bytes.toBytes("col3--"),Bytes.toBytes("col4--"),Bytes.toBytes("col5--"),Bytes.toBytes("col6--"),Bytes.toBytes("col7--"),Bytes.toBytes("col8--"),Bytes.toBytes("col9--")}; for (int i=0; i < 10; i++) { Put put=new Put(ROWS[i]); put.setDurability(Durability.SKIP_WAL); put.addColumn(FAMILY,QUALIFIERS[i],VALUE); ht.put(put); } Scan scan=new Scan(); scan.addFamily(FAMILY); Filter filter=new KeyOnlyFilter(true); scan.setFilter(filter); ResultScanner scanner=ht.getScanner(scan); int count=0; for ( Result result : ht.getScanner(scan)) { assertEquals(result.size(),1); assertEquals(result.rawCells()[0].getValueLength(),Bytes.SIZEOF_INT); assertEquals(Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0])),VALUE.length); count++; } assertEquals(count,10); scanner.close(); }

APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetRegionsInRange() throws Exception { byte[] startKey=Bytes.toBytes("ddc"); byte[] endKey=Bytes.toBytes("mmm"); TableName TABLE=TableName.valueOf("testGetRegionsInRange"); Table t=TEST_UTIL.createMultiRegionTable(TABLE,new byte[][]{FAMILY},10); if (t instanceof HTable) { HTable table=(HTable)t; int numOfRegions=-1; try (RegionLocator r=table.getRegionLocator()){ numOfRegions=r.getStartKeys().length; } assertEquals(26,numOfRegions); List regionsList=getRegionsInRange(TABLE,startKey,endKey); assertEquals(10,regionsList.size()); startKey=Bytes.toBytes("fff"); regionsList=getRegionsInRange(TABLE,startKey,endKey); assertEquals(7,regionsList.size()); endKey=Bytes.toBytes("nnn"); regionsList=getRegionsInRange(TABLE,startKey,endKey); assertEquals(8,regionsList.size()); regionsList=getRegionsInRange(TABLE,HConstants.EMPTY_START_ROW,endKey); assertEquals(13,regionsList.size()); regionsList=getRegionsInRange(TABLE,startKey,HConstants.EMPTY_END_ROW); assertEquals(21,regionsList.size()); regionsList=getRegionsInRange(TABLE,HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); assertEquals(26,regionsList.size()); endKey=Bytes.toBytes("zzz1"); regionsList=getRegionsInRange(TABLE,startKey,endKey); assertEquals(21,regionsList.size()); startKey=Bytes.toBytes("aac"); regionsList=getRegionsInRange(TABLE,startKey,endKey); assertEquals(26,regionsList.size()); startKey=endKey=Bytes.toBytes("ccc"); regionsList=getRegionsInRange(TABLE,startKey,endKey); assertEquals(1,regionsList.size()); } }

InternalCallVerifier BooleanVerifier 
@Test public void testGet_NonExistentRow() throws IOException { Table table=TEST_UTIL.createTable(TableName.valueOf("testGet_NonExistentRow"),FAMILY); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); LOG.info("Row put"); Get get=new Get(ROW); get.addFamily(FAMILY); Result r=table.get(get); assertFalse(r.isEmpty()); System.out.println("Row retrieved successfully"); byte[] missingrow=Bytes.toBytes("missingrow"); get=new Get(missingrow); get.addFamily(FAMILY); r=table.get(get); assertTrue(r.isEmpty()); LOG.info("Row missing as it should be"); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testUpdatesWithMajorCompaction() throws Exception { TableName TABLE=TableName.valueOf("testUpdatesWithMajorCompaction"); Table hTable=TEST_UTIL.createTable(TABLE,FAMILY,10); Admin admin=TEST_UTIL.getHBaseAdmin(); byte[] row=Bytes.toBytes("row2"); byte[] qualifier=Bytes.toBytes("myCol"); Put put=new Put(row); put.addColumn(FAMILY,qualifier,1L,Bytes.toBytes("AAA")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,2L,Bytes.toBytes("BBB")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,3L,Bytes.toBytes("EEE")); hTable.put(put); Get get=new Get(row); get.addColumn(FAMILY,qualifier); get.setMaxVersions(); Result result=hTable.get(get); NavigableMap navigableMap=result.getMap().get(FAMILY).get(qualifier); assertEquals("AAA",Bytes.toString(navigableMap.get(1L))); assertEquals("BBB",Bytes.toString(navigableMap.get(2L))); admin.flush(TABLE); admin.majorCompact(TABLE); Thread.sleep(6000); put=new Put(row); put.addColumn(FAMILY,qualifier,1L,Bytes.toBytes("CCC")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,2L,Bytes.toBytes("DDD")); hTable.put(put); admin.flush(TABLE); admin.majorCompact(TABLE); Thread.sleep(6000); result=hTable.get(get); navigableMap=result.getMap().get(FAMILY).get(qualifier); assertEquals("CCC",Bytes.toString(navigableMap.get(1L))); assertEquals("DDD",Bytes.toString(navigableMap.get(2L))); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests reversed scan under multi regions */ @Test public void testSmallReversedScanUnderMultiRegions() throws Exception { TableName TABLE=TableName.valueOf("testSmallReversedScanUnderMultiRegions"); byte[][] splitRows=new byte[][]{Bytes.toBytes("000"),Bytes.toBytes("002"),Bytes.toBytes("004"),Bytes.toBytes("006"),Bytes.toBytes("008"),Bytes.toBytes("010")}; Table table=TEST_UTIL.createTable(TABLE,FAMILY,splitRows); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(TABLE)){ assertEquals(splitRows.length + 1,l.getAllRegionLocations().size()); } for ( byte[] splitRow : splitRows) { Put put=new Put(splitRow); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); byte[] nextRow=Bytes.copy(splitRow); nextRow[nextRow.length - 1]++; put=new Put(nextRow); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); } ResultScanner scanner=table.getScanner(new Scan()); int count=0; for ( Result r : scanner) { assertTrue(!r.isEmpty()); count++; } assertEquals(12,count); reverseScanTest(table,false); reverseScanTest(table,true); table.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMajorCompactionBetweenTwoUpdates() throws Exception { TableName tableName=TableName.valueOf("testMajorCompactionBetweenTwoUpdates"); Table hTable=TEST_UTIL.createTable(tableName,FAMILY,10); Admin admin=TEST_UTIL.getHBaseAdmin(); byte[] row=Bytes.toBytes("row3"); byte[] qualifier=Bytes.toBytes("myCol"); Put put=new Put(row); put.addColumn(FAMILY,qualifier,1L,Bytes.toBytes("AAA")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,2L,Bytes.toBytes("BBB")); hTable.put(put); put=new Put(row); put.addColumn(FAMILY,qualifier,3L,Bytes.toBytes("EEE")); hTable.put(put); Get get=new Get(row); get.addColumn(FAMILY,qualifier); get.setMaxVersions(); Result result=hTable.get(get); NavigableMap navigableMap=result.getMap().get(FAMILY).get(qualifier); assertEquals("AAA",Bytes.toString(navigableMap.get(1L))); assertEquals("BBB",Bytes.toString(navigableMap.get(2L))); admin.flush(tableName); admin.majorCompact(tableName); Thread.sleep(6000); put=new Put(row); put.addColumn(FAMILY,qualifier,1L,Bytes.toBytes("CCC")); hTable.put(put); admin.flush(tableName); admin.majorCompact(tableName); Thread.sleep(6000); put=new Put(row); put.addColumn(FAMILY,qualifier,2L,Bytes.toBytes("DDD")); hTable.put(put); admin.flush(tableName); admin.majorCompact(tableName); Thread.sleep(6000); result=hTable.get(get); navigableMap=result.getMap().get(FAMILY).get(qualifier); assertEquals("CCC",Bytes.toString(navigableMap.get(1L))); assertEquals("DDD",Bytes.toString(navigableMap.get(2L))); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVersionLimits() throws Exception { TableName TABLE=TableName.valueOf("testVersionLimits"); byte[][] FAMILIES=makeNAscii(FAMILY,3); int[] LIMITS={1,3,5}; long[] STAMPS=makeStamps(10); byte[][] VALUES=makeNAscii(VALUE,10); Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,LIMITS); Put put=new Put(ROW); put.addColumn(FAMILIES[0],QUALIFIER,STAMPS[0],VALUES[0]); put.addColumn(FAMILIES[0],QUALIFIER,STAMPS[1],VALUES[1]); put.addColumn(FAMILIES[1],QUALIFIER,STAMPS[0],VALUES[0]); put.addColumn(FAMILIES[1],QUALIFIER,STAMPS[1],VALUES[1]); put.addColumn(FAMILIES[1],QUALIFIER,STAMPS[2],VALUES[2]); put.addColumn(FAMILIES[1],QUALIFIER,STAMPS[3],VALUES[3]); put.addColumn(FAMILIES[2],QUALIFIER,STAMPS[0],VALUES[0]); put.addColumn(FAMILIES[2],QUALIFIER,STAMPS[1],VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,STAMPS[2],VALUES[2]); put.addColumn(FAMILIES[2],QUALIFIER,STAMPS[3],VALUES[3]); put.addColumn(FAMILIES[2],QUALIFIER,STAMPS[4],VALUES[4]); put.addColumn(FAMILIES[2],QUALIFIER,STAMPS[5],VALUES[5]); put.addColumn(FAMILIES[2],QUALIFIER,STAMPS[6],VALUES[6]); ht.put(put); Get get=new Get(ROW); get.addColumn(FAMILIES[0],QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); Result result=ht.get(get); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{STAMPS[1]},new byte[][]{VALUES[1]},0,0); get=new Get(ROW); get.addFamily(FAMILIES[0]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{STAMPS[1]},new byte[][]{VALUES[1]},0,0); Scan scan=new Scan(ROW); scan.addColumn(FAMILIES[0],QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{STAMPS[1]},new byte[][]{VALUES[1]},0,0); scan=new Scan(ROW); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{STAMPS[1]},new byte[][]{VALUES[1]},0,0); get=new Get(ROW); get.addColumn(FAMILIES[1],QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILIES[1],QUALIFIER,new long[]{STAMPS[1],STAMPS[2],STAMPS[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); get=new Get(ROW); get.addFamily(FAMILIES[1]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILIES[1],QUALIFIER,new long[]{STAMPS[1],STAMPS[2],STAMPS[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); scan=new Scan(ROW); scan.addColumn(FAMILIES[1],QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[1],QUALIFIER,new long[]{STAMPS[1],STAMPS[2],STAMPS[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); scan=new Scan(ROW); scan.addFamily(FAMILIES[1]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[1],QUALIFIER,new long[]{STAMPS[1],STAMPS[2],STAMPS[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); get=new Get(ROW); get.addColumn(FAMILIES[2],QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILIES[2],QUALIFIER,new long[]{STAMPS[2],STAMPS[3],STAMPS[4],STAMPS[5],STAMPS[6]},new byte[][]{VALUES[2],VALUES[3],VALUES[4],VALUES[5],VALUES[6]},0,4); get=new Get(ROW); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILIES[2],QUALIFIER,new long[]{STAMPS[2],STAMPS[3],STAMPS[4],STAMPS[5],STAMPS[6]},new byte[][]{VALUES[2],VALUES[3],VALUES[4],VALUES[5],VALUES[6]},0,4); scan=new Scan(ROW); scan.addColumn(FAMILIES[2],QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[2],QUALIFIER,new long[]{STAMPS[2],STAMPS[3],STAMPS[4],STAMPS[5],STAMPS[6]},new byte[][]{VALUES[2],VALUES[3],VALUES[4],VALUES[5],VALUES[6]},0,4); scan=new Scan(ROW); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[2],QUALIFIER,new long[]{STAMPS[2],STAMPS[3],STAMPS[4],STAMPS[5],STAMPS[6]},new byte[][]{VALUES[2],VALUES[3],VALUES[4],VALUES[5],VALUES[6]},0,4); get=new Get(ROW); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 9 keys but received " + result.size(),result.size() == 9); get=new Get(ROW); get.addFamily(FAMILIES[0]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 9 keys but received " + result.size(),result.size() == 9); get=new Get(ROW); get.addColumn(FAMILIES[0],QUALIFIER); get.addColumn(FAMILIES[1],QUALIFIER); get.addColumn(FAMILIES[2],QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 9 keys but received " + result.size(),result.size() == 9); scan=new Scan(ROW); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertTrue("Expected 9 keys but received " + result.size(),result.size() == 9); scan=new Scan(ROW); scan.setMaxVersions(Integer.MAX_VALUE); scan.addFamily(FAMILIES[0]); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); result=getSingleScanResult(ht,scan); assertTrue("Expected 9 keys but received " + result.size(),result.size() == 9); scan=new Scan(ROW); scan.setMaxVersions(Integer.MAX_VALUE); scan.addColumn(FAMILIES[0],QUALIFIER); scan.addColumn(FAMILIES[1],QUALIFIER); scan.addColumn(FAMILIES[2],QUALIFIER); result=getSingleScanResult(ht,scan); assertTrue("Expected 9 keys but received " + result.size(),result.size() == 9); }

InternalCallVerifier BooleanVerifier 
@Test public void testGet_EmptyTable() throws IOException { Table table=TEST_UTIL.createTable(TableName.valueOf("testGet_EmptyTable"),FAMILY); Get get=new Get(ROW); get.addFamily(FAMILY); Result r=table.get(get); assertTrue(r.isEmpty()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFilterWithLongCompartor() throws Exception { TableName TABLE=TableName.valueOf("testFilterWithLongCompartor"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); byte[][] ROWS=makeN(ROW,10); byte[][] values=new byte[10][]; for (int i=0; i < 10; i++) { values[i]=Bytes.toBytes(100L * i); } for (int i=0; i < 10; i++) { Put put=new Put(ROWS[i]); put.setDurability(Durability.SKIP_WAL); put.addColumn(FAMILY,QUALIFIER,values[i]); ht.put(put); } Scan scan=new Scan(); scan.addFamily(FAMILY); Filter filter=new SingleColumnValueFilter(FAMILY,QUALIFIER,CompareOp.GREATER,new LongComparator(500)); scan.setFilter(filter); ResultScanner scanner=ht.getScanner(scan); int expectedIndex=0; for ( Result result : ht.getScanner(scan)) { assertEquals(result.size(),1); assertTrue(Bytes.toLong(result.getValue(FAMILY,QUALIFIER)) > 500); expectedIndex++; } assertEquals(expectedIndex,4); scanner.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test from client side of an involved filter against a multi family that * involves deletes. * @throws Exception */ @Test public void testWeirdCacheBehaviour() throws Exception { TableName TABLE=TableName.valueOf("testWeirdCacheBehaviour"); byte[][] FAMILIES=new byte[][]{Bytes.toBytes("trans-blob"),Bytes.toBytes("trans-type"),Bytes.toBytes("trans-date"),Bytes.toBytes("trans-tags"),Bytes.toBytes("trans-group")}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES); String value="this is the value"; String value2="this is some other value"; String keyPrefix1=UUID.randomUUID().toString(); String keyPrefix2=UUID.randomUUID().toString(); String keyPrefix3=UUID.randomUUID().toString(); putRows(ht,3,value,keyPrefix1); putRows(ht,3,value,keyPrefix2); putRows(ht,3,value,keyPrefix3); putRows(ht,3,value2,keyPrefix1); putRows(ht,3,value2,keyPrefix2); putRows(ht,3,value2,keyPrefix3); Table table=TEST_UTIL.getConnection().getTable(TABLE); System.out.println("Checking values for key: " + keyPrefix1); assertEquals("Got back incorrect number of rows from scan",3,getNumberOfRows(keyPrefix1,value2,table)); System.out.println("Checking values for key: " + keyPrefix2); assertEquals("Got back incorrect number of rows from scan",3,getNumberOfRows(keyPrefix2,value2,table)); System.out.println("Checking values for key: " + keyPrefix3); assertEquals("Got back incorrect number of rows from scan",3,getNumberOfRows(keyPrefix3,value2,table)); deleteColumns(ht,value2,keyPrefix1); deleteColumns(ht,value2,keyPrefix2); deleteColumns(ht,value2,keyPrefix3); System.out.println("Starting important checks....."); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1,0,getNumberOfRows(keyPrefix1,value2,table)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2,0,getNumberOfRows(keyPrefix2,value2,table)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3,0,getNumberOfRows(keyPrefix3,value2,table)); }

InternalCallVerifier EqualityVerifier 
@Test public void testCheckAndDeleteWithCompareOp() throws IOException { final byte[] value1=Bytes.toBytes("aaaa"); final byte[] value2=Bytes.toBytes("bbbb"); final byte[] value3=Bytes.toBytes("cccc"); final byte[] value4=Bytes.toBytes("dddd"); Table table=TEST_UTIL.createTable(TableName.valueOf("testCheckAndDeleteWithCompareOp"),FAMILY); Put put2=new Put(ROW); put2.addColumn(FAMILY,QUALIFIER,value2); table.put(put2); Put put3=new Put(ROW); put3.addColumn(FAMILY,QUALIFIER,value3); Delete delete=new Delete(ROW); delete.addColumns(FAMILY,QUALIFIER); boolean ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.GREATER,value1,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.EQUAL,value1,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.GREATER_OR_EQUAL,value1,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.LESS,value1,delete); assertEquals(ok,true); table.put(put2); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.LESS_OR_EQUAL,value1,delete); assertEquals(ok,true); table.put(put2); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.NOT_EQUAL,value1,delete); assertEquals(ok,true); table.put(put3); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.LESS,value4,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.LESS_OR_EQUAL,value4,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.EQUAL,value4,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.GREATER,value4,delete); assertEquals(ok,true); table.put(put3); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.GREATER_OR_EQUAL,value4,delete); assertEquals(ok,true); table.put(put3); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.NOT_EQUAL,value4,delete); assertEquals(ok,true); table.put(put2); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.GREATER,value2,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.NOT_EQUAL,value2,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.LESS,value2,delete); assertEquals(ok,false); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.GREATER_OR_EQUAL,value2,delete); assertEquals(ok,true); table.put(put2); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.LESS_OR_EQUAL,value2,delete); assertEquals(ok,true); table.put(put2); ok=table.checkAndDelete(ROW,FAMILY,QUALIFIER,CompareOp.EQUAL,value2,delete); assertEquals(ok,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowMutation() throws Exception { LOG.info("Starting testMultiRowMutation"); final TableName TABLENAME=TableName.valueOf("testMultiRowMutation"); final byte[] ROW1=Bytes.toBytes("testRow1"); Table t=TEST_UTIL.createTable(TABLENAME,FAMILY); Put p=new Put(ROW); p.addColumn(FAMILY,QUALIFIER,VALUE); MutationProto m1=ProtobufUtil.toMutation(MutationType.PUT,p); p=new Put(ROW1); p.addColumn(FAMILY,QUALIFIER,VALUE); MutationProto m2=ProtobufUtil.toMutation(MutationType.PUT,p); MutateRowsRequest.Builder mrmBuilder=MutateRowsRequest.newBuilder(); mrmBuilder.addMutationRequest(m1); mrmBuilder.addMutationRequest(m2); MutateRowsRequest mrm=mrmBuilder.build(); CoprocessorRpcChannel channel=t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service=MultiRowMutationService.newBlockingStub(channel); service.mutateRows(null,mrm); Get g=new Get(ROW); Result r=t.get(g); assertEquals(0,Bytes.compareTo(VALUE,r.getValue(FAMILY,QUALIFIER))); g=new Get(ROW1); r=t.get(g); assertEquals(0,Bytes.compareTo(VALUE,r.getValue(FAMILY,QUALIFIER))); }

InternalCallVerifier BooleanVerifier 
/** * simple test that just executes parts of the client * API that accept a pre-created HConnection instance * @throws IOException */ @Test public void testUnmanagedHConnection() throws IOException { final TableName tableName=TableName.valueOf("testUnmanagedHConnection"); TEST_UTIL.createTable(tableName,HConstants.CATALOG_FAMILY); Connection conn=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Table t=conn.getTable(tableName); Admin admin=conn.getAdmin(); assertTrue(admin.tableExists(tableName)); assertTrue(t.get(new Get(ROW)).isEmpty()); admin.close(); }

InternalCallVerifier NullVerifier 
@Test public void testFilterAllRecords() throws IOException { Scan scan=new Scan(); scan.setBatch(1); scan.setCaching(1); scan.setFilter(new FilterList(new FirstKeyOnlyFilter(),new InclusiveStopFilter(new byte[0]))); try (Table table=TEST_UTIL.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME)){ try (ResultScanner s=table.getScanner(scan)){ assertNull(s.next()); } } }

InternalCallVerifier IdentityVerifier 
/** * Verifies that getConfiguration returns the same Configuration object used * to create the HTable instance. */ @Test public void testGetConfiguration() throws Exception { TableName TABLE=TableName.valueOf("testGetConfiguration"); byte[][] FAMILIES=new byte[][]{Bytes.toBytes("foo")}; Configuration conf=TEST_UTIL.getConfiguration(); Table table=TEST_UTIL.createTable(TABLE,FAMILIES); assertSame(conf,table.getConfiguration()); }

InternalCallVerifier BooleanVerifier 
/** * test of that unmanaged HConnections are able to reconnect * properly (see HBASE-5058) * @throws Exception */ @Test public void testUnmanagedHConnectionReconnect() throws Exception { final TableName tableName=TableName.valueOf("testUnmanagedHConnectionReconnect"); TEST_UTIL.createTable(tableName,HConstants.CATALOG_FAMILY); Connection conn=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Table t=conn.getTable(tableName); try (Admin admin=conn.getAdmin()){ assertTrue(admin.tableExists(tableName)); assertTrue(t.get(new Get(ROW)).isEmpty()); } MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); cluster.stopMaster(0,false); cluster.waitOnMaster(0); cluster.startMaster(); assertTrue(cluster.waitForActiveAndReadyMaster()); try (Admin admin=conn.getAdmin()){ assertTrue(admin.tableExists(tableName)); assertTrue(admin.getClusterStatus().getServersSize() == SLAVES + 1); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNonCachedGetRegionLocation() throws Exception { TableName TABLE=TableName.valueOf("testNonCachedGetRegionLocation"); byte[] family1=Bytes.toBytes("f1"); byte[] family2=Bytes.toBytes("f2"); try (Table table=TEST_UTIL.createTable(TABLE,new byte[][]{family1,family2},10);Admin admin=TEST_UTIL.getHBaseAdmin();RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(TABLE)){ List allRegionLocations=locator.getAllRegionLocations(); assertEquals(1,allRegionLocations.size()); HRegionInfo regionInfo=allRegionLocations.get(0).getRegionInfo(); ServerName addrBefore=allRegionLocations.get(0).getServerName(); HRegionLocation addrCache=locator.getRegionLocation(regionInfo.getStartKey(),false); HRegionLocation addrNoCache=locator.getRegionLocation(regionInfo.getStartKey(),true); assertEquals(addrBefore.getPort(),addrCache.getPort()); assertEquals(addrBefore.getPort(),addrNoCache.getPort()); ServerName addrAfter=null; for (int i=0; i < SLAVES; i++) { HRegionServer regionServer=TEST_UTIL.getHBaseCluster().getRegionServer(i); ServerName addr=regionServer.getServerName(); if (addr.getPort() != addrBefore.getPort()) { admin.move(regionInfo.getEncodedNameAsBytes(),Bytes.toBytes(addr.toString())); Thread.sleep(5000); addrAfter=addr; break; } } addrCache=locator.getRegionLocation(regionInfo.getStartKey(),false); addrNoCache=locator.getRegionLocation(regionInfo.getStartKey(),true); assertNotNull(addrAfter); assertTrue(addrAfter.getPort() != addrCache.getPort()); assertEquals(addrAfter.getPort(),addrNoCache.getPort()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPut() throws IOException { final byte[] CONTENTS_FAMILY=Bytes.toBytes("contents"); final byte[] SMALL_FAMILY=Bytes.toBytes("smallfam"); final byte[] row1=Bytes.toBytes("row1"); final byte[] row2=Bytes.toBytes("row2"); final byte[] value=Bytes.toBytes("abcd"); Table table=TEST_UTIL.createTable(TableName.valueOf("testPut"),new byte[][]{CONTENTS_FAMILY,SMALL_FAMILY}); Put put=new Put(row1); put.addColumn(CONTENTS_FAMILY,null,value); table.put(put); put=new Put(row2); put.addColumn(CONTENTS_FAMILY,null,value); assertEquals(put.size(),1); assertEquals(put.getFamilyCellMap().get(CONTENTS_FAMILY).size(),1); KeyValue kv=(KeyValue)put.getFamilyCellMap().get(CONTENTS_FAMILY).get(0); assertTrue(Bytes.equals(CellUtil.cloneFamily(kv),CONTENTS_FAMILY)); assertTrue(Bytes.equals(CellUtil.cloneQualifier(kv),new byte[0])); assertTrue(Bytes.equals(CellUtil.cloneValue(kv),value)); table.put(put); Scan scan=new Scan(); scan.addColumn(CONTENTS_FAMILY,null); ResultScanner scanner=table.getScanner(scan); for ( Result r : scanner) { for ( Cell key : r.rawCells()) { System.out.println(Bytes.toString(r.getRow()) + ": " + key.toString()); } } }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testGetClosestRowBefore() throws IOException, InterruptedException { final TableName tableName=TableName.valueOf("testGetClosestRowBefore"); final byte[] firstRow=Bytes.toBytes("row111"); final byte[] secondRow=Bytes.toBytes("row222"); final byte[] thirdRow=Bytes.toBytes("row333"); final byte[] forthRow=Bytes.toBytes("row444"); final byte[] beforeFirstRow=Bytes.toBytes("row"); final byte[] beforeSecondRow=Bytes.toBytes("row22"); final byte[] beforeThirdRow=Bytes.toBytes("row33"); final byte[] beforeForthRow=Bytes.toBytes("row44"); try (Table t=TEST_UTIL.createTable(tableName,new byte[][]{HConstants.CATALOG_FAMILY,Bytes.toBytes("info2")},1,1024);RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ if (t instanceof HTableInterface) { HTableInterface table=(HTableInterface)t; String regionName=locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region=TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); Put put1=new Put(firstRow); Put put2=new Put(secondRow); Put put3=new Put(thirdRow); Put put4=new Put(forthRow); byte[] one=new byte[]{1}; byte[] two=new byte[]{2}; byte[] three=new byte[]{3}; byte[] four=new byte[]{4}; put1.addColumn(HConstants.CATALOG_FAMILY,null,one); put2.addColumn(HConstants.CATALOG_FAMILY,null,two); put3.addColumn(HConstants.CATALOG_FAMILY,null,three); put4.addColumn(HConstants.CATALOG_FAMILY,null,four); table.put(put1); table.put(put2); table.put(put3); table.put(put4); region.flush(true); Result result; result=getReverseScanResult(table,beforeFirstRow,HConstants.CATALOG_FAMILY); assertNull(result); result=getReverseScanResult(table,firstRow,HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),firstRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),one)); result=getReverseScanResult(table,beforeSecondRow,HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),firstRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),one)); result=getReverseScanResult(table,secondRow,HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),secondRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),two)); result=getReverseScanResult(table,beforeThirdRow,HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),secondRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),two)); result=getReverseScanResult(table,thirdRow,HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),thirdRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),three)); result=getReverseScanResult(table,beforeForthRow,HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),thirdRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),three)); result=getReverseScanResult(table,forthRow,HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),forthRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),four)); result=getReverseScanResult(table,Bytes.add(forthRow,one),HConstants.CATALOG_FAMILY); assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY,null)); assertTrue(Bytes.equals(result.getRow(),forthRow)); assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY,null),four)); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeletes() throws Exception { TableName TABLE=TableName.valueOf("testDeletes"); byte[][] ROWS=makeNAscii(ROW,6); byte[][] FAMILIES=makeNAscii(FAMILY,3); byte[][] VALUES=makeN(VALUE,5); long[] ts={1000,2000,3000,4000,5000}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,3); Put put=new Put(ROW); put.addColumn(FAMILIES[0],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[0],QUALIFIER,ts[1],VALUES[1]); ht.put(put); Delete delete=new Delete(ROW); delete.addFamily(FAMILIES[0],ts[0]); ht.delete(delete); Get get=new Get(ROW); get.addFamily(FAMILIES[0]); get.setMaxVersions(Integer.MAX_VALUE); Result result=ht.get(get); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1]},new byte[][]{VALUES[1]},0,0); Scan scan=new Scan(ROW); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1]},new byte[][]{VALUES[1]},0,0); put=new Put(ROW); put.addColumn(FAMILIES[0],QUALIFIER,ts[4],VALUES[4]); put.addColumn(FAMILIES[0],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[0],QUALIFIER,ts[3],VALUES[3]); put.addColumn(FAMILIES[0],null,ts[4],VALUES[4]); put.addColumn(FAMILIES[0],null,ts[2],VALUES[2]); put.addColumn(FAMILIES[0],null,ts[3],VALUES[3]); ht.put(put); delete=new Delete(ROW); delete.addColumn(FAMILIES[0],QUALIFIER); ht.delete(delete); get=new Get(ROW); get.addColumn(FAMILIES[0],QUALIFIER); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1],ts[2],ts[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); scan=new Scan(ROW); scan.addColumn(FAMILIES[0],QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1],ts[2],ts[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); delete=new Delete(ROW); delete.addColumn(FAMILIES[0],null); ht.delete(delete); delete=new Delete(ROW); delete.addColumns(FAMILIES[0],null); ht.delete(delete); put=new Put(ROW); put.addColumn(FAMILIES[0],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[0],QUALIFIER,ts[4],VALUES[4]); ht.put(put); get=new Get(ROW); get.addFamily(FAMILIES[0]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1],ts[2],ts[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); scan=new Scan(ROW); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1],ts[2],ts[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); put=new Put(ROWS[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[1],VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[2],QUALIFIER,ts[3],VALUES[3]); ht.put(put); put=new Put(ROWS[1]); put.addColumn(FAMILIES[1],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[1],VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[2],QUALIFIER,ts[3],VALUES[3]); ht.put(put); put=new Put(ROWS[2]); put.addColumn(FAMILIES[1],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[1],VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[2],QUALIFIER,ts[3],VALUES[3]); ht.put(put); get=new Get(ROWS[2]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 4 key but received " + result.size() + ": "+ result,result.size() == 4); delete=new Delete(ROWS[0]); delete.addFamily(FAMILIES[2]); ht.delete(delete); delete=new Delete(ROWS[1]); delete.addColumns(FAMILIES[1],QUALIFIER); ht.delete(delete); delete=new Delete(ROWS[2]); delete.addColumn(FAMILIES[1],QUALIFIER); delete.addColumn(FAMILIES[1],QUALIFIER); delete.addColumn(FAMILIES[2],QUALIFIER); ht.delete(delete); get=new Get(ROWS[0]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); assertNResult(result,ROWS[0],FAMILIES[1],QUALIFIER,new long[]{ts[0],ts[1]},new byte[][]{VALUES[0],VALUES[1]},0,1); scan=new Scan(ROWS[0]); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); assertNResult(result,ROWS[0],FAMILIES[1],QUALIFIER,new long[]{ts[0],ts[1]},new byte[][]{VALUES[0],VALUES[1]},0,1); get=new Get(ROWS[1]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); scan=new Scan(ROWS[1]); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); get=new Get(ROWS[2]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertEquals(1,result.size()); assertNResult(result,ROWS[2],FAMILIES[2],QUALIFIER,new long[]{ts[2]},new byte[][]{VALUES[2]},0,0); scan=new Scan(ROWS[2]); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertEquals(1,result.size()); assertNResult(result,ROWS[2],FAMILIES[2],QUALIFIER,new long[]{ts[2]},new byte[][]{VALUES[2]},0,0); delete=new Delete(ROWS[3]); delete.addFamily(FAMILIES[1]); ht.delete(delete); put=new Put(ROWS[3]); put.addColumn(FAMILIES[2],QUALIFIER,VALUES[0]); ht.put(put); put=new Put(ROWS[4]); put.addColumn(FAMILIES[1],QUALIFIER,VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,VALUES[2]); ht.put(put); get=new Get(ROWS[3]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 1 key but received " + result.size(),result.size() == 1); get=new Get(ROWS[4]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.setMaxVersions(Integer.MAX_VALUE); result=ht.get(get); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); scan=new Scan(ROWS[3]); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); ResultScanner scanner=ht.getScanner(scan); result=scanner.next(); assertTrue("Expected 1 key but received " + result.size(),result.size() == 1); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]),ROWS[3])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]),VALUES[0])); result=scanner.next(); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]),ROWS[4])); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[1]),ROWS[4])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]),VALUES[1])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[1]),VALUES[2])); scanner.close(); for (int i=0; i < 10; i++) { byte[] bytes=Bytes.toBytes(i); put=new Put(bytes); put.setDurability(Durability.SKIP_WAL); put.addColumn(FAMILIES[0],QUALIFIER,bytes); ht.put(put); } for (int i=0; i < 10; i++) { byte[] bytes=Bytes.toBytes(i); get=new Get(bytes); get.addFamily(FAMILIES[0]); result=ht.get(get); assertTrue(result.size() == 1); } ArrayList deletes=new ArrayList(); for (int i=0; i < 10; i++) { byte[] bytes=Bytes.toBytes(i); delete=new Delete(bytes); delete.addFamily(FAMILIES[0]); deletes.add(delete); } ht.delete(deletes); for (int i=0; i < 10; i++) { byte[] bytes=Bytes.toBytes(i); get=new Get(bytes); get.addFamily(FAMILIES[0]); result=ht.get(get); assertTrue(result.size() == 0); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test public void testListTables() throws IOException, InterruptedException { TableName t1=TableName.valueOf("testListTables1"); TableName t2=TableName.valueOf("testListTables2"); TableName t3=TableName.valueOf("testListTables3"); TableName[] tables=new TableName[]{t1,t2,t3}; for (int i=0; i < tables.length; i++) { TEST_UTIL.createTable(tables[i],FAMILY); } Admin admin=TEST_UTIL.getHBaseAdmin(); HTableDescriptor[] ts=admin.listTables(); HashSet result=new HashSet(ts.length); Collections.addAll(result,ts); int size=result.size(); assertTrue(size >= tables.length); for (int i=0; i < tables.length && i < size; i++) { boolean found=false; for (int j=0; j < ts.length; j++) { if (ts[j].getTableName().equals(tables[i])) { found=true; break; } } assertTrue("Not found: " + tables[i],found); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeletesWithReverseScan() throws Exception { TableName TABLE=TableName.valueOf("testDeletesWithReverseScan"); byte[][] ROWS=makeNAscii(ROW,6); byte[][] FAMILIES=makeNAscii(FAMILY,3); byte[][] VALUES=makeN(VALUE,5); long[] ts={1000,2000,3000,4000,5000}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,3); Put put=new Put(ROW); put.addColumn(FAMILIES[0],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[0],QUALIFIER,ts[1],VALUES[1]); ht.put(put); Delete delete=new Delete(ROW); delete.addFamily(FAMILIES[0],ts[0]); ht.delete(delete); Scan scan=new Scan(ROW); scan.setReversed(true); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); Result result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1]},new byte[][]{VALUES[1]},0,0); put=new Put(ROW); put.addColumn(FAMILIES[0],QUALIFIER,ts[4],VALUES[4]); put.addColumn(FAMILIES[0],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[0],QUALIFIER,ts[3],VALUES[3]); put.addColumn(FAMILIES[0],null,ts[4],VALUES[4]); put.addColumn(FAMILIES[0],null,ts[2],VALUES[2]); put.addColumn(FAMILIES[0],null,ts[3],VALUES[3]); ht.put(put); delete=new Delete(ROW); delete.addColumn(FAMILIES[0],QUALIFIER); ht.delete(delete); scan=new Scan(ROW); scan.setReversed(true); scan.addColumn(FAMILIES[0],QUALIFIER); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1],ts[2],ts[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); delete=new Delete(ROW); delete.addColumn(FAMILIES[0],null); ht.delete(delete); delete=new Delete(ROW); delete.addColumns(FAMILIES[0],null); ht.delete(delete); put=new Put(ROW); put.addColumn(FAMILIES[0],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[0],QUALIFIER,ts[4],VALUES[4]); ht.put(put); scan=new Scan(ROW); scan.setReversed(true); scan.addFamily(FAMILIES[0]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertNResult(result,ROW,FAMILIES[0],QUALIFIER,new long[]{ts[1],ts[2],ts[3]},new byte[][]{VALUES[1],VALUES[2],VALUES[3]},0,2); put=new Put(ROWS[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[1],VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[2],QUALIFIER,ts[3],VALUES[3]); ht.put(put); put=new Put(ROWS[1]); put.addColumn(FAMILIES[1],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[1],VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[2],QUALIFIER,ts[3],VALUES[3]); ht.put(put); put=new Put(ROWS[2]); put.addColumn(FAMILIES[1],QUALIFIER,ts[0],VALUES[0]); put.addColumn(FAMILIES[1],QUALIFIER,ts[1],VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,ts[2],VALUES[2]); put.addColumn(FAMILIES[2],QUALIFIER,ts[3],VALUES[3]); ht.put(put); delete=new Delete(ROWS[0]); delete.addFamily(FAMILIES[2]); ht.delete(delete); delete=new Delete(ROWS[1]); delete.addColumns(FAMILIES[1],QUALIFIER); ht.delete(delete); delete=new Delete(ROWS[2]); delete.addColumn(FAMILIES[1],QUALIFIER); delete.addColumn(FAMILIES[1],QUALIFIER); delete.addColumn(FAMILIES[2],QUALIFIER); ht.delete(delete); scan=new Scan(ROWS[0]); scan.setReversed(true); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); assertNResult(result,ROWS[0],FAMILIES[1],QUALIFIER,new long[]{ts[0],ts[1]},new byte[][]{VALUES[0],VALUES[1]},0,1); scan=new Scan(ROWS[1]); scan.setReversed(true); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); scan=new Scan(ROWS[2]); scan.setReversed(true); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); result=getSingleScanResult(ht,scan); assertEquals(1,result.size()); assertNResult(result,ROWS[2],FAMILIES[2],QUALIFIER,new long[]{ts[2]},new byte[][]{VALUES[2]},0,0); delete=new Delete(ROWS[3]); delete.addFamily(FAMILIES[1]); ht.delete(delete); put=new Put(ROWS[3]); put.addColumn(FAMILIES[2],QUALIFIER,VALUES[0]); ht.put(put); put=new Put(ROWS[4]); put.addColumn(FAMILIES[1],QUALIFIER,VALUES[1]); put.addColumn(FAMILIES[2],QUALIFIER,VALUES[2]); ht.put(put); scan=new Scan(ROWS[4]); scan.setReversed(true); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.setMaxVersions(Integer.MAX_VALUE); ResultScanner scanner=ht.getScanner(scan); result=scanner.next(); assertTrue("Expected 2 keys but received " + result.size(),result.size() == 2); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]),ROWS[4])); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[1]),ROWS[4])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]),VALUES[1])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[1]),VALUES[2])); result=scanner.next(); assertTrue("Expected 1 key but received " + result.size(),result.size() == 1); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]),ROWS[3])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]),VALUES[0])); scanner.close(); ht.close(); }

InternalCallVerifier BooleanVerifier 
@Test public void testSuperSimpleWithReverseScan() throws Exception { TableName TABLE=TableName.valueOf("testSuperSimpleWithReverseScan"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); Put put=new Put(Bytes.toBytes("0-b11111-0000000000000000000")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b11111-0000000000000000002")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b11111-0000000000000000004")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b11111-0000000000000000006")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b11111-0000000000000000008")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b22222-0000000000000000001")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b22222-0000000000000000003")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b22222-0000000000000000005")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b22222-0000000000000000007")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); put=new Put(Bytes.toBytes("0-b22222-0000000000000000009")); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); Scan scan=new Scan(Bytes.toBytes("0-b11111-9223372036854775807"),Bytes.toBytes("0-b11111-0000000000000000000")); scan.setReversed(true); ResultScanner scanner=ht.getScanner(scan); Result result=scanner.next(); assertTrue(Bytes.equals(result.getRow(),Bytes.toBytes("0-b11111-0000000000000000008"))); scanner.close(); ht.close(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test ScanMetrics * @throws Exception */ @Test @SuppressWarnings("unused") public void testScanMetrics() throws Exception { TableName TABLENAME=TableName.valueOf("testScanMetrics"); Table ht=TEST_UTIL.createMultiRegionTable(TABLENAME,FAMILY); int numOfRegions=-1; try (RegionLocator r=TEST_UTIL.getConnection().getRegionLocator(TABLENAME)){ numOfRegions=r.getStartKeys().length; } Put put1=new Put(Bytes.toBytes("zzz1")); put1.addColumn(FAMILY,QUALIFIER,VALUE); Put put2=new Put(Bytes.toBytes("zzz2")); put2.addColumn(FAMILY,QUALIFIER,VALUE); Put put3=new Put(Bytes.toBytes("zzz3")); put3.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(Arrays.asList(put1,put2,put3)); Scan scan1=new Scan(); int numRecords=0; ResultScanner scanner=ht.getScanner(scan1); for ( Result result : scanner) { numRecords++; } scanner.close(); LOG.info("test data has " + numRecords + " records."); assertEquals(null,scan1.getScanMetrics()); Scan scan2=new Scan(); scan2.setScanMetricsEnabled(true); scan2.setCaching(numRecords + 1); scanner=ht.getScanner(scan2); for ( Result result : scanner.next(numRecords - 1)) { } scanner.close(); assertNotNull(scan2.getScanMetrics()); scan2=new Scan(); scan2.setScanMetricsEnabled(true); scan2.setCaching(1); scanner=ht.getScanner(scan2); for ( Result result : scanner.next(numRecords - 1)) { } scanner.close(); ScanMetrics scanMetrics=scan2.getScanMetrics(); assertEquals("Did not access all the regions in the table",numOfRegions,scanMetrics.countOfRegions.get()); scan2=new Scan(); scan2.setScanMetricsEnabled(true); scan2.setCaching(1); scanner=ht.getScanner(scan2); int numBytes=0; for ( Result result : scanner.next(1)) { for ( Cell cell : result.listCells()) { numBytes+=CellUtil.estimatedSerializedSizeOf(cell); } } scanner.close(); scanMetrics=scan2.getScanMetrics(); assertEquals("Did not count the result bytes",numBytes,scanMetrics.countOfBytesInResults.get()); scan2=new Scan(); scan2.setScanMetricsEnabled(true); scan2.setCaching(1); scan2.setSmall(true); scanner=ht.getScanner(scan2); numBytes=0; for ( Result result : scanner.next(1)) { for ( Cell cell : result.listCells()) { numBytes+=CellUtil.estimatedSerializedSizeOf(cell); } } scanner.close(); scanMetrics=scan2.getScanMetrics(); assertEquals("Did not count the result bytes",numBytes,scanMetrics.countOfBytesInResults.get()); Scan scanWithClose=new Scan(); scanWithClose.setCaching(numRecords); scanWithClose.setScanMetricsEnabled(true); ResultScanner scannerWithClose=ht.getScanner(scanWithClose); for ( Result result : scannerWithClose.next(numRecords + 1)) { } scannerWithClose.close(); ScanMetrics scanMetricsWithClose=getScanMetrics(scanWithClose); assertEquals("Did not access all the regions in the table",numOfRegions,scanMetricsWithClose.countOfRegions.get()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckAndPut() throws IOException { final byte[] anotherrow=Bytes.toBytes("anotherrow"); final byte[] value2=Bytes.toBytes("abcd"); Table table=TEST_UTIL.createTable(TableName.valueOf("testCheckAndPut"),FAMILY); Put put1=new Put(ROW); put1.addColumn(FAMILY,QUALIFIER,VALUE); boolean ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,VALUE,put1); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,null,put1); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,null,put1); assertEquals(ok,false); Put put2=new Put(ROW); put2.addColumn(FAMILY,QUALIFIER,value2); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,VALUE,put2); assertEquals(ok,true); Put put3=new Put(anotherrow); put3.addColumn(FAMILY,QUALIFIER,VALUE); try { ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,value2,put3); fail("trying to check and modify different rows should have failed."); } catch ( Exception e) { } }

InternalCallVerifier EqualityVerifier 
@Test public void testAppend() throws Exception { LOG.info("Starting testAppend"); final TableName TABLENAME=TableName.valueOf("testAppend"); Table t=TEST_UTIL.createTable(TABLENAME,FAMILY); byte[] v1=Bytes.toBytes("42"); byte[] v2=Bytes.toBytes("23"); byte[][] QUALIFIERS=new byte[][]{Bytes.toBytes("b"),Bytes.toBytes("a"),Bytes.toBytes("c")}; Append a=new Append(ROW); a.add(FAMILY,QUALIFIERS[0],v1); a.add(FAMILY,QUALIFIERS[1],v2); a.setReturnResults(false); assertNullResult(t.append(a)); a=new Append(ROW); a.add(FAMILY,QUALIFIERS[0],v2); a.add(FAMILY,QUALIFIERS[1],v1); a.add(FAMILY,QUALIFIERS[2],v2); Result r=t.append(a); assertEquals(0,Bytes.compareTo(Bytes.add(v1,v2),r.getValue(FAMILY,QUALIFIERS[0]))); assertEquals(0,Bytes.compareTo(Bytes.add(v2,v1),r.getValue(FAMILY,QUALIFIERS[1]))); assertEquals(0,Bytes.compareTo(v2,r.getValue(FAMILY,QUALIFIERS[2]))); assertEquals(r.getColumnLatestCell(FAMILY,QUALIFIERS[0]).getTimestamp(),r.getColumnLatestCell(FAMILY,QUALIFIERS[2]).getTimestamp()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Basic client side validation of HBASE-10118 */ @Test public void testPurgeFutureDeletes() throws Exception { final TableName TABLENAME=TableName.valueOf("testPurgeFutureDeletes"); final byte[] ROW=Bytes.toBytes("row"); final byte[] FAMILY=Bytes.toBytes("family"); final byte[] COLUMN=Bytes.toBytes("column"); final byte[] VALUE=Bytes.toBytes("value"); Table table=TEST_UTIL.createTable(TABLENAME,FAMILY); long ts=System.currentTimeMillis() * 2; Put put=new Put(ROW,ts); put.addColumn(FAMILY,COLUMN,VALUE); table.put(put); Get get=new Get(ROW); Result result=table.get(get); assertArrayEquals(VALUE,result.getValue(FAMILY,COLUMN)); Delete del=new Delete(ROW); del.addColumn(FAMILY,COLUMN,ts); table.delete(del); get=new Get(ROW); result=table.get(get); assertNull(result.getValue(FAMILY,COLUMN)); TEST_UTIL.getHBaseAdmin().flush(TABLENAME); TEST_UTIL.getHBaseAdmin().majorCompact(TABLENAME); TEST_UTIL.waitFor(6000,new Waiter.Predicate(){ @Override public boolean evaluate() throws IOException { return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) == AdminProtos.GetRegionInfoResponse.CompactionState.NONE; } } ); put=new Put(ROW,ts); put.addColumn(FAMILY,COLUMN,VALUE); table.put(put); get=new Get(ROW); result=table.get(get); assertArrayEquals(VALUE,result.getValue(FAMILY,COLUMN)); table.close(); }

InternalCallVerifier BooleanVerifier 
@Test public void testPutNoCF() throws IOException { final byte[] BAD_FAM=Bytes.toBytes("BAD_CF"); final byte[] VAL=Bytes.toBytes(100); Table table=TEST_UTIL.createTable(TableName.valueOf("testPutNoCF"),FAMILY); boolean caughtNSCFE=false; try { Put p=new Put(ROW); p.addColumn(BAD_FAM,QUALIFIER,VAL); table.put(p); } catch ( RetriesExhaustedWithDetailsException e) { caughtNSCFE=e.getCause(0) instanceof NoSuchColumnFamilyException; } assertTrue("Should throw NoSuchColumnFamilyException",caughtNSCFE); }

InternalCallVerifier BooleanVerifier 
/** * For HBASE-2156 * @throws Exception */ @Test public void testScanVariableReuse() throws Exception { Scan scan=new Scan(); scan.addFamily(FAMILY); scan.addColumn(FAMILY,ROW); assertTrue(scan.getFamilyMap().get(FAMILY).size() == 1); scan=new Scan(); scan.addFamily(FAMILY); assertTrue(scan.getFamilyMap().get(FAMILY) == null); assertTrue(scan.getFamilyMap().containsKey(FAMILY)); }

InternalCallVerifier EqualityVerifier 
@Test public void testCheckAndPutWithCompareOp() throws IOException { final byte[] value1=Bytes.toBytes("aaaa"); final byte[] value2=Bytes.toBytes("bbbb"); final byte[] value3=Bytes.toBytes("cccc"); final byte[] value4=Bytes.toBytes("dddd"); Table table=TEST_UTIL.createTable(TableName.valueOf("testCheckAndPutWithCompareOp"),FAMILY); Put put2=new Put(ROW); put2.addColumn(FAMILY,QUALIFIER,value2); Put put3=new Put(ROW); put3.addColumn(FAMILY,QUALIFIER,value3); boolean ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,null,put2); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.GREATER,value1,put2); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.EQUAL,value1,put2); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.GREATER_OR_EQUAL,value1,put2); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.LESS,value1,put2); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.LESS_OR_EQUAL,value1,put2); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.NOT_EQUAL,value1,put3); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.LESS,value4,put3); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.LESS_OR_EQUAL,value4,put3); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.EQUAL,value4,put3); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.GREATER,value4,put3); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.GREATER_OR_EQUAL,value4,put3); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.NOT_EQUAL,value4,put2); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.GREATER,value2,put2); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.NOT_EQUAL,value2,put2); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.LESS,value2,put2); assertEquals(ok,false); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.GREATER_OR_EQUAL,value2,put2); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.LESS_OR_EQUAL,value2,put2); assertEquals(ok,true); ok=table.checkAndPut(ROW,FAMILY,QUALIFIER,CompareOp.EQUAL,value2,put3); assertEquals(ok,true); }

InternalCallVerifier BooleanVerifier 
@Test public void testSuperSimple() throws Exception { TableName TABLE=TableName.valueOf("testSuperSimple"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); ht.put(put); Scan scan=new Scan(); scan.addColumn(FAMILY,TABLE.toBytes()); ResultScanner scanner=ht.getScanner(scan); Result result=scanner.next(); assertTrue("Expected null result",result == null); scanner.close(); }

Class: org.apache.hadoop.hbase.client.TestFromClientSide3

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHTableExistsAllBeforeGet() throws Exception { final byte[] ROW2=Bytes.add(ROW,Bytes.toBytes("2")); Table table=TEST_UTIL.createTable(TableName.valueOf("testHTableExistsAllBeforeGet"),new byte[][]{FAMILY}); try { Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); put=new Put(ROW2); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); Get get=new Get(ROW); Get get2=new Get(ROW2); ArrayList getList=new ArrayList(2); getList.add(get); getList.add(get2); boolean[] exists=table.existsAll(getList); assertEquals(true,exists[0]); assertEquals(true,exists[1]); Result[] result=table.get(getList); assertEquals(false,result[0].isEmpty()); assertTrue(Bytes.equals(VALUE,result[0].getValue(FAMILY,QUALIFIER))); assertEquals(false,result[1].isEmpty()); assertTrue(Bytes.equals(VALUE,result[1].getValue(FAMILY,QUALIFIER))); } finally { table.close(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testGetEmptyRow() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test"))); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table table=TEST_UTIL.getConnection().getTable(desc.getTableName()); Put put=new Put(ROW_BYTES); put.addColumn(FAMILY,COL_QUAL,VAL_BYTES); table.put(put); Result res=null; try { res=table.get(new Get(new byte[0])); fail(); } catch ( IllegalArgumentException e) { } assertTrue(res == null); res=table.get(new Get(Bytes.toBytes("r1-not-exist"))); assertTrue(res.isEmpty() == true); res=table.get(new Get(ROW_BYTES)); assertTrue(Arrays.equals(res.getValue(FAMILY,COL_QUAL),VAL_BYTES)); table.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=60000) public void testAdvancedConfigOverride() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min",3); TableName tableName=TableName.valueOf("testAdvancedConfigOverride"); Table hTable=TEST_UTIL.createTable(tableName,FAMILY,10); Admin admin=TEST_UTIL.getHBaseAdmin(); ClusterConnection connection=(ClusterConnection)TEST_UTIL.getConnection(); byte[] row=Bytes.toBytes(random.nextInt()); performMultiplePutAndFlush((HBaseAdmin)admin,hTable,row,FAMILY,3,100); try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ HRegionLocation loc=locator.getRegionLocation(row,true); byte[] regionName=loc.getRegionInfo().getRegionName(); AdminProtos.AdminService.BlockingInterface server=connection.getAdmin(loc.getServerName()); assertTrue(ProtobufUtil.getStoreFiles(server,regionName,FAMILY).size() > 1); admin.compact(tableName); for (int i=0; i < 10 * 1000 / 40; ++i) { loc=locator.getRegionLocation(row,true); if (!loc.getRegionInfo().isOffline()) { regionName=loc.getRegionInfo().getRegionName(); server=connection.getAdmin(loc.getServerName()); if (ProtobufUtil.getStoreFiles(server,regionName,FAMILY).size() <= 1) { break; } } Thread.sleep(40); } assertTrue(ProtobufUtil.getStoreFiles(server,regionName,FAMILY).size() <= 1); LOG.info("hbase.hstore.compaction.min should now be 5"); HTableDescriptor htd=new HTableDescriptor(hTable.getTableDescriptor()); htd.setValue("hbase.hstore.compaction.min",String.valueOf(5)); admin.modifyTable(tableName,htd); Pair st; while (null != (st=admin.getAlterStatus(tableName)) && st.getFirst() > 0) { LOG.debug(st.getFirst() + " regions left to update"); Thread.sleep(40); } LOG.info("alter status finished"); performMultiplePutAndFlush((HBaseAdmin)admin,hTable,row,FAMILY,3,10); admin.compact(tableName); Thread.sleep(10 * 1000); loc=locator.getRegionLocation(row,true); regionName=loc.getRegionInfo().getRegionName(); server=connection.getAdmin(loc.getServerName()); int sfCount=ProtobufUtil.getStoreFiles(server,regionName,FAMILY).size(); assertTrue(sfCount > 1); LOG.info("hbase.hstore.compaction.min should now be 2"); HColumnDescriptor hcd=new HColumnDescriptor(htd.getFamily(FAMILY)); hcd.setValue("hbase.hstore.compaction.min",String.valueOf(2)); htd.modifyFamily(hcd); admin.modifyTable(tableName,htd); while (null != (st=admin.getAlterStatus(tableName)) && st.getFirst() > 0) { LOG.debug(st.getFirst() + " regions left to update"); Thread.sleep(40); } LOG.info("alter status finished"); admin.compact(tableName); for (int i=0; i < 10 * 1000 / 40; ++i) { loc=locator.getRegionLocation(row,true); regionName=loc.getRegionInfo().getRegionName(); try { server=connection.getAdmin(loc.getServerName()); if (ProtobufUtil.getStoreFiles(server,regionName,FAMILY).size() < sfCount) { break; } } catch ( Exception e) { LOG.debug("Waiting for region to come online: " + regionName); } Thread.sleep(40); } assertTrue(ProtobufUtil.getStoreFiles(server,regionName,FAMILY).size() < sfCount); LOG.info("Removing CF config value"); LOG.info("hbase.hstore.compaction.min should now be 5"); hcd=new HColumnDescriptor(htd.getFamily(FAMILY)); hcd.setValue("hbase.hstore.compaction.min",null); htd.modifyFamily(hcd); admin.modifyTable(tableName,htd); while (null != (st=admin.getAlterStatus(tableName)) && st.getFirst() > 0) { LOG.debug(st.getFirst() + " regions left to update"); Thread.sleep(40); } LOG.info("alter status finished"); assertNull(hTable.getTableDescriptor().getFamily(FAMILY).getValue("hbase.hstore.compaction.min")); } }

InternalCallVerifier EqualityVerifier 
@Test public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception { Table table=TEST_UTIL.createTable(TableName.valueOf("testHTableExistsMethodMultipleRegionsMultipleGets"),new byte[][]{FAMILY},1,new byte[]{0x00},new byte[]{(byte)0xff},255); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); List gets=new ArrayList(); gets.add(new Get(ANOTHERROW)); gets.add(new Get(Bytes.add(ROW,new byte[]{0x00}))); gets.add(new Get(ROW)); gets.add(new Get(Bytes.add(ANOTHERROW,new byte[]{0x00}))); LOG.info("Calling exists"); boolean[] results=table.existsAll(gets); assertEquals(results[0],false); assertEquals(results[1],false); assertEquals(results[2],true); assertEquals(results[3],false); put=new Put(new byte[]{0x00}); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); gets=new ArrayList(); gets.add(new Get(new byte[]{0x00})); gets.add(new Get(new byte[]{0x00,0x00})); results=table.existsAll(gets); assertEquals(results[0],true); assertEquals(results[1],false); put=new Put(new byte[]{(byte)0xff,(byte)0xff}); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); gets=new ArrayList(); gets.add(new Get(new byte[]{(byte)0xff})); gets.add(new Get(new byte[]{(byte)0xff,(byte)0xff})); gets.add(new Get(new byte[]{(byte)0xff,(byte)0xff,(byte)0xff})); results=table.existsAll(gets); assertEquals(results[0],false); assertEquals(results[1],true); assertEquals(results[2],false); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHTableExistsBeforeGet() throws Exception { Table table=TEST_UTIL.createTable(TableName.valueOf("testHTableExistsBeforeGet"),new byte[][]{FAMILY}); try { Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); table.put(put); Get get=new Get(ROW); boolean exist=table.exists(get); assertEquals(true,exist); Result result=table.get(get); assertEquals(false,result.isEmpty()); assertTrue(Bytes.equals(VALUE,result.getValue(FAMILY,QUALIFIER))); } finally { table.close(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testHTableExistsMethodSingleRegionSingleGet() throws Exception { Table table=TEST_UTIL.createTable(TableName.valueOf("testHTableExistsMethodSingleRegionSingleGet"),new byte[][]{FAMILY}); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); Get get=new Get(ROW); boolean exist=table.exists(get); assertEquals(exist,false); table.put(put); exist=table.exists(get); assertEquals(exist,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testHTableExistsMethodMultipleRegionsSingleGet() throws Exception { Table table=TEST_UTIL.createTable(TableName.valueOf("testHTableExistsMethodMultipleRegionsSingleGet"),new byte[][]{FAMILY},1,new byte[]{0x00},new byte[]{(byte)0xff},255); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); Get get=new Get(ROW); boolean exist=table.exists(get); assertEquals(exist,false); table.put(put); exist=table.exists(get); assertEquals(exist,true); }

Class: org.apache.hadoop.hbase.client.TestFromClientSideNoCodec

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier 
@Test public void testBasics() throws IOException { final TableName t=TableName.valueOf("testBasics"); final byte[][] fs=new byte[][]{Bytes.toBytes("cf1"),Bytes.toBytes("cf2"),Bytes.toBytes("cf3")}; Table ht=TEST_UTIL.createTable(t,fs); final byte[] row=Bytes.toBytes("row"); Put p=new Put(row); for ( byte[] f : fs) { p.addColumn(f,f,f); } ht.put(p); Result r=ht.get(new Get(row)); int i=0; for (CellScanner cellScanner=r.cellScanner(); cellScanner.advance(); ) { Cell cell=cellScanner.current(); byte[] f=fs[i++]; assertTrue(Bytes.toString(f),Bytes.equals(cell.getValueArray(),cell.getValueOffset(),cell.getValueLength(),f,0,f.length)); } if (ht instanceof HTableInterface) { HTableInterface hti=(HTableInterface)ht; byte[] f=fs[0]; Get get=new Get(row); get.addFamily(f); r=ht.get(get); assertTrue(r.toString(),r.containsColumn(f,f)); } ResultScanner scanner=ht.getScanner(new Scan()); int count=0; while ((r=scanner.next()) != null) { assertTrue(r.listCells().size() == 3); count++; } assertTrue(count == 1); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testNoCodec(){ Configuration c=new Configuration(); c.set("hbase.client.default.rpc.codec",""); String codec=AbstractRpcClient.getDefaultCodec(c); assertTrue(codec == null || codec.length() == 0); }

Class: org.apache.hadoop.hbase.client.TestGet

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void TestGetRowFromGetCopyConstructor() throws Exception { Get get=new Get(ROW); get.setFilter(null); get.setAuthorizations(new Authorizations("foo")); get.setACL("u",new Permission(Permission.Action.READ)); get.setConsistency(Consistency.TIMELINE); get.setReplicaId(2); get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); get.setCheckExistenceOnly(true); get.setTimeRange(3,4); get.setMaxVersions(11); get.setMaxResultsPerColumnFamily(10); get.setRowOffsetPerColumnFamily(11); get.setCacheBlocks(true); Get copyGet=new Get(get); assertEquals(0,Bytes.compareTo(get.getRow(),copyGet.getRow())); assertEquals(get.getId(),copyGet.getId()); assertEquals(get.getFilter(),copyGet.getFilter()); assertTrue(get.getAuthorizations().toString().equals(copyGet.getAuthorizations().toString())); assertTrue(Bytes.equals(get.getACL(),copyGet.getACL())); assertEquals(get.getConsistency(),copyGet.getConsistency()); assertEquals(get.getReplicaId(),copyGet.getReplicaId()); assertEquals(get.getIsolationLevel(),copyGet.getIsolationLevel()); assertEquals(get.isCheckExistenceOnly(),copyGet.isCheckExistenceOnly()); assertTrue(get.getTimeRange().equals(copyGet.getTimeRange())); assertEquals(get.getMaxVersions(),copyGet.getMaxVersions()); assertEquals(get.getMaxResultsPerColumnFamily(),copyGet.getMaxResultsPerColumnFamily()); assertEquals(get.getRowOffsetPerColumnFamily(),copyGet.getRowOffsetPerColumnFamily()); assertEquals(get.getCacheBlocks(),copyGet.getCacheBlocks()); assertEquals(get.getId(),copyGet.getId()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDynamicFilter() throws Exception { Configuration conf=HBaseConfiguration.create(); String localPath=conf.get("hbase.local.dir") + File.separator + "jars"+ File.separator; File jarFile=new File(localPath,"MockFilter.jar"); jarFile.delete(); assertFalse("Should be deleted: " + jarFile.getPath(),jarFile.exists()); ClientProtos.Get getProto1=ClientProtos.Get.parseFrom(Base64.decode(PB_GET)); ClientProtos.Get getProto2=ClientProtos.Get.parseFrom(Base64.decode(PB_GET_WITH_FILTER_LIST)); try { ProtobufUtil.toGet(getProto1); fail("Should not be able to load the filter class"); } catch ( IOException ioe) { assertTrue(ioe.getCause() instanceof ClassNotFoundException); } try { ProtobufUtil.toGet(getProto2); fail("Should not be able to load the filter class"); } catch ( IOException ioe) { assertTrue(ioe.getCause() instanceof InvocationTargetException); InvocationTargetException ite=(InvocationTargetException)ioe.getCause(); assertTrue(ite.getTargetException() instanceof DeserializationException); } FileOutputStream fos=new FileOutputStream(jarFile); fos.write(Base64.decode(MOCK_FILTER_JAR)); fos.close(); Get get1=ProtobufUtil.toGet(getProto1); assertEquals("test.MockFilter",get1.getFilter().getClass().getName()); Get get2=ProtobufUtil.toGet(getProto2); assertTrue(get2.getFilter() instanceof FilterList); List filters=((FilterList)get2.getFilter()).getFilters(); assertEquals(3,filters.size()); assertEquals("test.MockFilter",filters.get(0).getClass().getName()); assertEquals("my.MockFilter",filters.get(1).getClass().getName()); assertTrue(filters.get(2) instanceof KeyOnlyFilter); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetAttributes(){ Get get=new Get(ROW); Assert.assertTrue(get.getAttributesMap().isEmpty()); Assert.assertNull(get.getAttribute("absent")); get.setAttribute("absent",null); Assert.assertTrue(get.getAttributesMap().isEmpty()); Assert.assertNull(get.getAttribute("absent")); get.setAttribute("attribute1",Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),get.getAttribute("attribute1"))); Assert.assertEquals(1,get.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),get.getAttributesMap().get("attribute1"))); get.setAttribute("attribute1",Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),get.getAttribute("attribute1"))); Assert.assertEquals(1,get.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),get.getAttributesMap().get("attribute1"))); get.setAttribute("attribute2",Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),get.getAttribute("attribute2"))); Assert.assertEquals(2,get.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),get.getAttributesMap().get("attribute2"))); get.setAttribute("attribute2",null); Assert.assertNull(get.getAttribute("attribute2")); Assert.assertEquals(1,get.getAttributesMap().size()); Assert.assertNull(get.getAttributesMap().get("attribute2")); get.setAttribute("attribute2",null); Assert.assertNull(get.getAttribute("attribute2")); Assert.assertEquals(1,get.getAttributesMap().size()); Assert.assertNull(get.getAttributesMap().get("attribute2")); get.setAttribute("attribute1",null); Assert.assertNull(get.getAttribute("attribute1")); Assert.assertTrue(get.getAttributesMap().isEmpty()); Assert.assertNull(get.getAttributesMap().get("attribute1")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAttributesSerialization() throws IOException { Get get=new Get(Bytes.toBytes("row")); get.setAttribute("attribute1",Bytes.toBytes("value1")); get.setAttribute("attribute2",Bytes.toBytes("value2")); get.setAttribute("attribute3",Bytes.toBytes("value3")); ClientProtos.Get getProto=ProtobufUtil.toGet(get); Get get2=ProtobufUtil.toGet(getProto); Assert.assertNull(get2.getAttribute("absent")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),get2.getAttribute("attribute1"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),get2.getAttribute("attribute2"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"),get2.getAttribute("attribute3"))); Assert.assertEquals(3,get2.getAttributesMap().size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testNullQualifier(){ Get get=new Get(ROW); byte[] family=Bytes.toBytes("family"); get.addColumn(family,null); Set qualifiers=get.getFamilyMap().get(family); Assert.assertEquals(1,qualifiers.size()); }

Class: org.apache.hadoop.hbase.client.TestHCM

InternalCallVerifier BooleanVerifier 
/** * Test that Connection or Pool are not closed when managed externally * @throws Exception */ @Test public void testConnectionManagement() throws Exception { Table table0=TEST_UTIL.createTable(TABLE_NAME1,FAM_NAM); Connection conn=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); HTable table=(HTable)conn.getTable(TABLE_NAME1); table.close(); assertFalse(conn.isClosed()); assertFalse(table.getPool().isShutdown()); table=(HTable)conn.getTable(TABLE_NAME1); table.close(); assertFalse(table.getPool().isShutdown()); conn.close(); assertTrue(table.getPool().isShutdown()); table0.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier IgnoredMethod 
@Ignore("Test presumes RETRY_BACKOFF will never change; it has") @Test public void testErrorBackoffTimeCalculation() throws Exception { final long ANY_PAUSE=100; ServerName location=ServerName.valueOf("127.0.0.1",1,0); ServerName diffLocation=ServerName.valueOf("127.0.0.1",2,0); ManualEnvironmentEdge timeMachine=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(timeMachine); try { long timeBase=timeMachine.currentTime(); long largeAmountOfTime=ANY_PAUSE * 1000; ConnectionImplementation.ServerErrorTracker tracker=new ConnectionImplementation.ServerErrorTracker(largeAmountOfTime,100); assertEquals(0,tracker.calculateBackoffTime(location,ANY_PAUSE)); tracker.reportServerError(location); assertEqualsWithJitter(ANY_PAUSE,tracker.calculateBackoffTime(location,ANY_PAUSE)); tracker.reportServerError(location); tracker.reportServerError(location); tracker.reportServerError(location); assertEqualsWithJitter(ANY_PAUSE * 5,tracker.calculateBackoffTime(location,ANY_PAUSE)); assertEquals(0,tracker.calculateBackoffTime(diffLocation,ANY_PAUSE)); tracker.reportServerError(diffLocation); assertEqualsWithJitter(ANY_PAUSE,tracker.calculateBackoffTime(diffLocation,ANY_PAUSE)); assertEqualsWithJitter(ANY_PAUSE * 10,tracker.calculateBackoffTime(location,ANY_PAUSE * 2)); long timeShift=(long)(ANY_PAUSE * 0.5); timeMachine.setValue(timeBase + timeShift); assertEqualsWithJitter((ANY_PAUSE * 5) - timeShift,tracker.calculateBackoffTime(location,ANY_PAUSE),ANY_PAUSE * 2); timeMachine.setValue(timeBase + ANY_PAUSE * 100); assertEquals(0,tracker.calculateBackoffTime(location,ANY_PAUSE)); long timeLeft=(long)(ANY_PAUSE * 0.5); timeMachine.setValue(timeBase + largeAmountOfTime - timeLeft); assertTrue(tracker.canTryMore(1)); tracker.reportServerError(location); assertEquals(timeLeft,tracker.calculateBackoffTime(location,ANY_PAUSE)); timeMachine.setValue(timeBase + largeAmountOfTime); assertFalse(tracker.canTryMore(1)); } finally { EnvironmentEdgeManager.reset(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testClusterConnection() throws IOException { ThreadPoolExecutor otherPool=new ThreadPoolExecutor(1,1,5,TimeUnit.SECONDS,new SynchronousQueue(),Threads.newDaemonThreadFactory("test-hcm")); Connection con1=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Connection con2=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(),otherPool); assertTrue(otherPool == ((ConnectionImplementation)con2).getCurrentBatchPool()); TableName tableName=TableName.valueOf("testClusterConnection"); TEST_UTIL.createTable(tableName,FAM_NAM).close(); HTable t=(HTable)con1.getTable(tableName,otherPool); assertNull("Internal Thread pool should be null",((ConnectionImplementation)con1).getCurrentBatchPool()); assertTrue(otherPool == t.getPool()); t.close(); t=(HTable)con2.getTable(tableName); assertTrue(otherPool == t.getPool()); t.close(); t=(HTable)con2.getTable(tableName); assertTrue(otherPool == t.getPool()); t.close(); t=(HTable)con2.getTable(tableName); assertTrue(otherPool == t.getPool()); t.close(); t=(HTable)con1.getTable(tableName); ExecutorService pool=((ConnectionImplementation)con1).getCurrentBatchPool(); assertNotNull("An internal Thread pool should have been created",pool); assertTrue(t.getPool() == pool); t.close(); t=(HTable)con1.getTable(tableName); assertTrue(t.getPool() == pool); t.close(); con1.close(); assertTrue(pool.isShutdown()); con2.close(); assertFalse(otherPool.isShutdown()); otherPool.shutdownNow(); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that when we delete a location using the first row of a region * that we really delete it. * @throws Exception */ @Test public void testRegionCaching() throws Exception { TEST_UTIL.createMultiRegionTable(TABLE_NAME,FAM_NAM).close(); Configuration conf=new Configuration(TEST_UTIL.getConfiguration()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,0); Connection connection=ConnectionFactory.createConnection(conf); final Table table=connection.getTable(TABLE_NAME); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); Put put=new Put(ROW); put.addColumn(FAM_NAM,ROW,ROW); table.put(put); ConnectionImplementation conn=(ConnectionImplementation)connection; assertNotNull(conn.getCachedLocation(TABLE_NAME,ROW)); final int nextPort=conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation().getPort() + 1; HRegionLocation loc=conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation(); conn.updateCachedLocation(loc.getRegionInfo(),loc.getServerName(),ServerName.valueOf("127.0.0.1",nextPort,HConstants.LATEST_TIMESTAMP),HConstants.LATEST_TIMESTAMP); Assert.assertEquals(conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation().getPort(),nextPort); conn.clearRegionCache(TABLE_NAME,ROW.clone()); RegionLocations rl=conn.getCachedLocation(TABLE_NAME,ROW); assertNull("What is this location?? " + rl,rl); conn.clearRegionCache(TABLE_NAME); Assert.assertEquals(0,conn.getNumberOfCachedRegionLocations(TABLE_NAME)); Put put2=new Put(ROW); put2.addColumn(FAM_NAM,ROW,ROW); table.put(put2); assertNotNull(conn.getCachedLocation(TABLE_NAME,ROW)); assertNotNull(conn.getCachedLocation(TableName.valueOf(TABLE_NAME.getName()),ROW.clone())); TEST_UTIL.getHBaseAdmin().setBalancerRunning(false,false); HMaster master=TEST_UTIL.getMiniHBaseCluster().getMaster(); while (master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(1); } HRegionLocation toMove=conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation(); byte[] regionName=toMove.getRegionInfo().getRegionName(); byte[] encodedRegionNameBytes=toMove.getRegionInfo().getEncodedNameAsBytes(); int curServerId=TEST_UTIL.getHBaseCluster().getServerWith(regionName); int destServerId=(curServerId == 0 ? 1 : 0); HRegionServer curServer=TEST_UTIL.getHBaseCluster().getRegionServer(curServerId); HRegionServer destServer=TEST_UTIL.getHBaseCluster().getRegionServer(destServerId); ServerName destServerName=destServer.getServerName(); Assert.assertTrue(curServer != destServer); Assert.assertFalse(curServer.getServerName().equals(destServer.getServerName())); Assert.assertFalse(toMove.getPort() == destServerName.getPort()); Assert.assertNotNull(curServer.getOnlineRegion(regionName)); Assert.assertNull(destServer.getOnlineRegion(regionName)); Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().isRegionsInTransition()); LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString()); TEST_UTIL.getHBaseAdmin().move(toMove.getRegionInfo().getEncodedNameAsBytes(),destServerName.getServerName().getBytes()); while (destServer.getOnlineRegion(regionName) == null || destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(1); } LOG.info("Move finished for region=" + toMove.getRegionInfo().getRegionNameAsString()); Assert.assertNull(curServer.getOnlineRegion(regionName)); Assert.assertNotNull(destServer.getOnlineRegion(regionName)); Assert.assertFalse(destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes)); Assert.assertFalse(curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes)); Assert.assertFalse(conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation().getPort() == destServerName.getPort()); LOG.info("Put starting"); Put put3=new Put(ROW); put3.addColumn(FAM_NAM,ROW,ROW); try { table.put(put3); Assert.fail("Unreachable point"); } catch ( RetriesExhaustedWithDetailsException e) { LOG.info("Put done, exception caught: " + e.getClass()); Assert.assertEquals(1,e.getNumExceptions()); Assert.assertEquals(1,e.getCauses().size()); Assert.assertArrayEquals(e.getRow(0).getRow(),ROW); Throwable cause=ClientExceptionsUtil.findException(e.getCause(0)); Assert.assertNotNull(cause); Assert.assertTrue(cause instanceof RegionMovedException); } Assert.assertNotNull("Cached connection is null",conn.getCachedLocation(TABLE_NAME,ROW)); Assert.assertEquals("Previous server was " + curServer.getServerName().getHostAndPort(),destServerName.getPort(),conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation().getPort()); Assert.assertFalse(destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes)); Assert.assertFalse(curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes)); LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString()); TEST_UTIL.getHBaseAdmin().move(toMove.getRegionInfo().getEncodedNameAsBytes(),curServer.getServerName().getServerName().getBytes()); while (curServer.getOnlineRegion(regionName) == null || destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(1); } Assert.assertNotNull(curServer.getOnlineRegion(regionName)); Assert.assertNull(destServer.getOnlineRegion(regionName)); LOG.info("Move finished for region=" + toMove.getRegionInfo().getRegionNameAsString()); Assert.assertFalse(conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation().getPort() == curServer.getServerName().getPort()); Scan sc=new Scan(); sc.setStopRow(ROW); sc.setStartRow(ROW); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,1); try { ResultScanner rs=table.getScanner(sc); while (rs.next() != null) { } Assert.fail("Unreachable point"); } catch ( RetriesExhaustedException e) { LOG.info("Scan done, expected exception caught: " + e.getClass()); } Assert.assertNotNull(conn.getCachedLocation(TABLE_NAME,ROW)); Assert.assertEquals("Previous server was " + destServer.getServerName().getHostAndPort(),curServer.getServerName().getPort(),conn.getCachedLocation(TABLE_NAME,ROW).getRegionLocation().getPort()); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); table.close(); connection.close(); }

InternalCallVerifier BooleanVerifier 
/** * Naive test to check that HConnection#getAdmin returns a properly constructed HBaseAdmin object * @throws IOException Unable to construct admin */ @Test public void testAdminFactory() throws IOException { Connection con1=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Admin admin=con1.getAdmin(); assertTrue(admin.getConnection() == con1); assertTrue(admin.getConfiguration() == TEST_UTIL.getConfiguration()); con1.close(); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that stale cache updates don't override newer cached values. */ @Test public void testCacheSeqNums() throws Exception { Table table=TEST_UTIL.createMultiRegionTable(TABLE_NAME2,FAM_NAM); Put put=new Put(ROW); put.addColumn(FAM_NAM,ROW,ROW); table.put(put); ConnectionImplementation conn=(ConnectionImplementation)TEST_UTIL.getConnection(); HRegionLocation location=conn.getCachedLocation(TABLE_NAME2,ROW).getRegionLocation(); assertNotNull(location); ServerName anySource=ServerName.valueOf(location.getHostname(),location.getPort() - 1,0L); int nextPort=location.getPort() + 1; conn.updateCachedLocation(location.getRegionInfo(),location.getServerName(),ServerName.valueOf("127.0.0.1",nextPort,0),location.getSeqNum() - 1); location=conn.getCachedLocation(TABLE_NAME2,ROW).getRegionLocation(); Assert.assertEquals(nextPort,location.getPort()); nextPort=location.getPort() + 1; conn.updateCachedLocation(location.getRegionInfo(),location.getServerName(),ServerName.valueOf("127.0.0.1",nextPort,0),location.getSeqNum() - 1); location=conn.getCachedLocation(TABLE_NAME2,ROW).getRegionLocation(); Assert.assertEquals(nextPort,location.getPort()); nextPort=location.getPort() + 1; conn.updateCachedLocation(location.getRegionInfo(),anySource,ServerName.valueOf("127.0.0.1",nextPort,0),location.getSeqNum() + 1); location=conn.getCachedLocation(TABLE_NAME2,ROW).getRegionLocation(); Assert.assertEquals(nextPort,location.getPort()); nextPort=location.getPort() + 1; conn.updateCachedLocation(location.getRegionInfo(),anySource,ServerName.valueOf("127.0.0.1",nextPort,0),location.getSeqNum() - 1); location=conn.getCachedLocation(TABLE_NAME2,ROW).getRegionLocation(); Assert.assertEquals(nextPort - 1,location.getPort()); table.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testClosing() throws Exception { Configuration configuration=new Configuration(TEST_UTIL.getConfiguration()); configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID,String.valueOf(_randy.nextInt())); Connection c1=ConnectionFactory.createConnection(configuration); Connection c2=ConnectionFactory.createConnection(configuration); assertTrue(c1 != c2); c1.close(); assertTrue(c1.isClosed()); assertFalse(c2.isClosed()); c2.close(); assertTrue(c2.isClosed()); }

InternalCallVerifier NullVerifier ExceptionVerifier HybridVerifier IgnoredMethod 
@Ignore @Test(expected=RegionServerStoppedException.class) public void testClusterStatus() throws Exception { TableName tn=TableName.valueOf("testClusterStatus"); byte[] cf="cf".getBytes(); byte[] rk="rk1".getBytes(); JVMClusterUtil.RegionServerThread rs=TEST_UTIL.getHBaseCluster().startRegionServer(); rs.waitForServerOnline(); final ServerName sn=rs.getRegionServer().getServerName(); Table t=TEST_UTIL.createTable(tn,cf); TEST_UTIL.waitTableAvailable(tn); while (TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(1); } final ConnectionImplementation hci=(ConnectionImplementation)TEST_UTIL.getConnection(); try (RegionLocator l=TEST_UTIL.getConnection().getRegionLocator(tn)){ while (l.getRegionLocation(rk).getPort() != sn.getPort()) { TEST_UTIL.getHBaseAdmin().move(l.getRegionLocation(rk).getRegionInfo().getEncodedNameAsBytes(),Bytes.toBytes(sn.toString())); while (TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(1); } hci.clearRegionCache(tn); } Assert.assertNotNull(hci.clusterStatusListener); TEST_UTIL.assertRegionOnServer(l.getRegionLocation(rk).getRegionInfo(),sn,20000); } Put p1=new Put(rk); p1.addColumn(cf,"qual".getBytes(),"val".getBytes()); t.put(p1); rs.getRegionServer().abort("I'm dead"); TEST_UTIL.waitFor(40000,1000,true,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getDeadServers().isDeadServer(sn); } } ); TEST_UTIL.waitFor(40000,1000,true,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return hci.clusterStatusListener.isDeadServer(sn); } } ); t.close(); hci.getClient(sn); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Trivial test to verify that nobody messes with{@link ConnectionFactory#createConnection(Configuration)} */ @Test public void testCreateConnection() throws Exception { Configuration configuration=TEST_UTIL.getConfiguration(); Connection c1=ConnectionFactory.createConnection(configuration); Connection c2=ConnectionFactory.createConnection(configuration); assertTrue(c1 != c2); assertTrue(c1.getConfiguration() == c2.getConfiguration()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * This test checks that one can connect to the cluster with only the * ZooKeeper quorum set. Other stuff like master address will be read * from ZK by the client. */ @Test public void testConnection() throws Exception { Configuration c=new Configuration(); c.set(HConstants.ZOOKEEPER_QUORUM,TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_QUORUM)); c.set(HConstants.ZOOKEEPER_CLIENT_PORT,TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT)); HConnection conn=(HConnection)ConnectionFactory.createConnection(c); assertTrue(conn.isMasterRunning()); conn.close(); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test that the connection to the dead server is cut immediately when we receive the * notification. * @throws Exception */ @Test public void testConnectionCut() throws Exception { TableName tableName=TableName.valueOf("HCM-testConnectionCut"); TEST_UTIL.createTable(tableName,FAM_NAM).close(); boolean previousBalance=TEST_UTIL.getHBaseAdmin().setBalancerRunning(false,true); Configuration c2=new Configuration(TEST_UTIL.getConfiguration()); c2.set(HConstants.HBASE_CLIENT_INSTANCE_ID,String.valueOf(-1)); c2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,0); c2.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY,30 * 1000); final Connection connection=ConnectionFactory.createConnection(c2); final Table table=connection.getTable(tableName); Put p=new Put(FAM_NAM); p.addColumn(FAM_NAM,FAM_NAM,FAM_NAM); table.put(p); final ConnectionImplementation hci=(ConnectionImplementation)connection; final HRegionLocation loc; try (RegionLocator rl=connection.getRegionLocator(tableName)){ loc=rl.getRegionLocation(FAM_NAM); } Get get=new Get(FAM_NAM); Assert.assertNotNull(table.get(get)); get=new Get(FAM_NAM); get.setFilter(new BlockingFilter()); Thread t=new Thread(){ @Override public void run(){ synchronized (syncBlockingFilter) { try { syncBlockingFilter.wait(); } catch ( InterruptedException e) { throw new RuntimeException(e); } } hci.clusterStatusListener.deadServerHandler.newDead(loc.getServerName()); } } ; t.start(); try { table.get(get); Assert.fail(); } catch ( IOException expected) { LOG.debug("Received: " + expected); Assert.assertFalse(expected instanceof SocketTimeoutException); Assert.assertFalse(syncBlockingFilter.get()); } finally { syncBlockingFilter.set(true); t.join(); TEST_UTIL.getHBaseAdmin().setBalancerRunning(previousBalance,true); } table.close(); connection.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMulti() throws Exception { Table table=TEST_UTIL.createMultiRegionTable(TABLE_NAME3,FAM_NAM); try { ConnectionImplementation conn=(ConnectionImplementation)TEST_UTIL.getConnection(); conn.clearRegionCache(TABLE_NAME3); Assert.assertEquals(0,conn.getNumberOfCachedRegionLocations(TABLE_NAME3)); TEST_UTIL.getHBaseAdmin().setBalancerRunning(false,false); HMaster master=TEST_UTIL.getMiniHBaseCluster().getMaster(); while (master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(1); } Put put=new Put(ROW_X); put.addColumn(FAM_NAM,ROW_X,ROW_X); table.put(put); HRegionLocation toMove=conn.getCachedLocation(TABLE_NAME3,ROW_X).getRegionLocation(); byte[] regionName=toMove.getRegionInfo().getRegionName(); byte[] encodedRegionNameBytes=toMove.getRegionInfo().getEncodedNameAsBytes(); int curServerId=TEST_UTIL.getHBaseCluster().getServerWith(regionName); int destServerId=(curServerId == 0 ? 1 : 0); HRegionServer curServer=TEST_UTIL.getHBaseCluster().getRegionServer(curServerId); HRegionServer destServer=TEST_UTIL.getHBaseCluster().getRegionServer(destServerId); ServerName destServerName=destServer.getServerName(); List regions=curServer.getOnlineRegions(TABLE_NAME3); byte[] otherRow=null; for ( Region region : regions) { if (!region.getRegionInfo().getEncodedName().equals(toMove.getRegionInfo().getEncodedName()) && Bytes.BYTES_COMPARATOR.compare(region.getRegionInfo().getStartKey(),ROW_X) < 0) { otherRow=region.getRegionInfo().getStartKey(); break; } } assertNotNull(otherRow); if (otherRow.length <= 0) otherRow=Bytes.toBytes("aaa"); Put put2=new Put(otherRow); put2.addColumn(FAM_NAM,otherRow,otherRow); table.put(put2); Assert.assertTrue(curServer != destServer); Assert.assertNotEquals(curServer.getServerName(),destServer.getServerName()); Assert.assertNotEquals(toMove.getPort(),destServerName.getPort()); Assert.assertNotNull(curServer.getOnlineRegion(regionName)); Assert.assertNull(destServer.getOnlineRegion(regionName)); Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().isRegionsInTransition()); LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString()); TEST_UTIL.getHBaseAdmin().move(toMove.getRegionInfo().getEncodedNameAsBytes(),destServerName.getServerName().getBytes()); while (destServer.getOnlineRegion(regionName) == null || destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(1); } LOG.info("Move finished for region=" + toMove.getRegionInfo().getRegionNameAsString()); Assert.assertNull(curServer.getOnlineRegion(regionName)); Assert.assertNotNull(destServer.getOnlineRegion(regionName)); Assert.assertFalse(destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes)); Assert.assertFalse(curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes)); Assert.assertFalse(conn.getCachedLocation(TABLE_NAME3,ROW_X).getRegionLocation().getPort() == destServerName.getPort()); final int prevNumRetriesVal=setNumTries(conn,2); Put put3=new Put(ROW_X); put3.addColumn(FAM_NAM,ROW_X,ROW_X); Put put4=new Put(otherRow); put4.addColumn(FAM_NAM,otherRow,otherRow); ArrayList actions=Lists.newArrayList(put4,put3); table.batch(actions,null); setNumTries(conn,prevNumRetriesVal); } finally { table.close(); } }

Class: org.apache.hadoop.hbase.client.TestHTableMultiplexer

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testHTableMultiplexer() throws Exception { TableName TABLE_1=TableName.valueOf("testHTableMultiplexer_1"); TableName TABLE_2=TableName.valueOf("testHTableMultiplexer_2"); final int NUM_REGIONS=10; final int VERSION=3; List failedPuts; boolean success; HTableMultiplexer multiplexer=new HTableMultiplexer(TEST_UTIL.getConfiguration(),PER_REGIONSERVER_QUEUE_SIZE); Table htable1=TEST_UTIL.createTable(TABLE_1,new byte[][]{FAMILY},VERSION,Bytes.toBytes("aaaaa"),Bytes.toBytes("zzzzz"),NUM_REGIONS); Table htable2=TEST_UTIL.createTable(TABLE_2,new byte[][]{FAMILY},VERSION,Bytes.toBytes("aaaaa"),Bytes.toBytes("zzzzz"),NUM_REGIONS); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_1); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_2); try (RegionLocator rl=TEST_UTIL.getConnection().getRegionLocator(TABLE_1)){ byte[][] startRows=rl.getStartKeys(); byte[][] endRows=rl.getEndKeys(); for (int i=0; i < NUM_REGIONS; i++) { byte[] row=startRows[i]; if (row == null || row.length <= 0) continue; Put put=new Put(row).addColumn(FAMILY,QUALIFIER,VALUE1); success=multiplexer.put(TABLE_1,put); assertTrue("multiplexer.put returns",success); put=new Put(row).addColumn(FAMILY,QUALIFIER,VALUE1); success=multiplexer.put(TABLE_2,put); assertTrue("multiplexer.put failed",success); LOG.info("Put for " + Bytes.toStringBinary(startRows[i]) + " @ iteration "+ (i + 1)); checkExistence(htable1,startRows[i],FAMILY,QUALIFIER); checkExistence(htable2,startRows[i],FAMILY,QUALIFIER); } List multiput=new ArrayList(); for (int i=0; i < NUM_REGIONS; i++) { byte[] row=endRows[i]; if (row == null || row.length <= 0) continue; Put put=new Put(row); put.addColumn(FAMILY,QUALIFIER,VALUE2); multiput.add(put); } failedPuts=multiplexer.put(TABLE_1,multiput); assertTrue(failedPuts == null); for (int i=0; i < NUM_REGIONS; i++) { byte[] row=endRows[i]; if (row == null || row.length <= 0) continue; Get get=new Get(row); get.addColumn(FAMILY,QUALIFIER); Result r; int nbTry=0; do { assertTrue(nbTry++ < 50); Thread.sleep(100); r=htable1.get(get); } while (r == null || r.getValue(FAMILY,QUALIFIER) == null || Bytes.compareTo(VALUE2,r.getValue(FAMILY,QUALIFIER)) != 0); } } }

Class: org.apache.hadoop.hbase.client.TestHTableMultiplexerFlushCache

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testOnRegionChange() throws Exception { TableName TABLE=TableName.valueOf("testOnRegionChange"); final int NUM_REGIONS=10; Table htable=TEST_UTIL.createTable(TABLE,new byte[][]{FAMILY},3,Bytes.toBytes("aaaaa"),Bytes.toBytes("zzzzz"),NUM_REGIONS); HTableMultiplexer multiplexer=new HTableMultiplexer(TEST_UTIL.getConfiguration(),PER_REGIONSERVER_QUEUE_SIZE); try (RegionLocator r=TEST_UTIL.getConnection().getRegionLocator(TABLE)){ byte[][] startRows=r.getStartKeys(); byte[] row=startRows[1]; assertTrue("2nd region should not start with empty row",row != null && row.length > 0); Put put=new Put(row).addColumn(FAMILY,QUALIFIER1,VALUE1); assertTrue("multiplexer.put returns",multiplexer.put(TABLE,put)); checkExistence(htable,row,FAMILY,QUALIFIER1,VALUE1); HRegionLocation loc=r.getRegionLocation(row); MiniHBaseCluster hbaseCluster=TEST_UTIL.getHBaseCluster(); hbaseCluster.stopRegionServer(loc.getServerName()); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE); put=new Put(row).addColumn(FAMILY,QUALIFIER2,VALUE2); assertTrue("multiplexer.put returns",multiplexer.put(TABLE,put)); checkExistence(htable,row,FAMILY,QUALIFIER2,VALUE2); } }

Class: org.apache.hadoop.hbase.client.TestIncrementsFromClientSide

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testIncrementOnSameColumn() throws Exception { LOG.info("Starting " + this.name.getMethodName()); final byte[] TABLENAME=Bytes.toBytes(filterStringSoTableNameSafe(this.name.getMethodName())); Table ht=TEST_UTIL.createTable(TableName.valueOf(TABLENAME),FAMILY); byte[][] QUALIFIERS=new byte[][]{Bytes.toBytes("A"),Bytes.toBytes("B"),Bytes.toBytes("C")}; Increment inc=new Increment(ROW); for (int i=0; i < QUALIFIERS.length; i++) { inc.addColumn(FAMILY,QUALIFIERS[i],1); inc.addColumn(FAMILY,QUALIFIERS[i],1); } ht.increment(inc); Get get=new Get(ROW); Result r=ht.get(get); Cell[] kvs=r.rawCells(); assertEquals(3,kvs.length); assertIncrementKey(kvs[0],ROW,FAMILY,QUALIFIERS[0],1); assertIncrementKey(kvs[1],ROW,FAMILY,QUALIFIERS[1],1); assertIncrementKey(kvs[2],ROW,FAMILY,QUALIFIERS[2],1); inc=new Increment(ROW); for (int i=0; i < QUALIFIERS.length; i++) { inc.addColumn(FAMILY,QUALIFIERS[i],1); inc.addColumn(FAMILY,QUALIFIERS[i],1); } ht.increment(inc); r=ht.get(get); kvs=r.rawCells(); assertEquals(3,kvs.length); assertIncrementKey(kvs[0],ROW,FAMILY,QUALIFIERS[0],2); assertIncrementKey(kvs[1],ROW,FAMILY,QUALIFIERS[1],2); assertIncrementKey(kvs[2],ROW,FAMILY,QUALIFIERS[2],2); ht.close(); }

InternalCallVerifier EqualityVerifier 
@Test public void testIncrement() throws Exception { LOG.info("Starting " + this.name.getMethodName()); final TableName TABLENAME=TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); Table ht=TEST_UTIL.createTable(TABLENAME,FAMILY); byte[][] ROWS=new byte[][]{Bytes.toBytes("a"),Bytes.toBytes("b"),Bytes.toBytes("c"),Bytes.toBytes("d"),Bytes.toBytes("e"),Bytes.toBytes("f"),Bytes.toBytes("g"),Bytes.toBytes("h"),Bytes.toBytes("i")}; byte[][] QUALIFIERS=new byte[][]{Bytes.toBytes("a"),Bytes.toBytes("b"),Bytes.toBytes("c"),Bytes.toBytes("d"),Bytes.toBytes("e"),Bytes.toBytes("f"),Bytes.toBytes("g"),Bytes.toBytes("h"),Bytes.toBytes("i")}; ht.incrementColumnValue(ROW,FAMILY,QUALIFIERS[0],1); ht.incrementColumnValue(ROW,FAMILY,QUALIFIERS[1],2); ht.incrementColumnValue(ROW,FAMILY,QUALIFIERS[2],3); ht.incrementColumnValue(ROW,FAMILY,QUALIFIERS[3],4); Increment inc=new Increment(ROW); inc.addColumn(FAMILY,QUALIFIERS[1],1); inc.addColumn(FAMILY,QUALIFIERS[3],1); inc.addColumn(FAMILY,QUALIFIERS[4],1); ht.increment(inc); Get get=new Get(ROW); Result r=ht.get(get); Cell[] kvs=r.rawCells(); assertEquals(5,kvs.length); assertIncrementKey(kvs[0],ROW,FAMILY,QUALIFIERS[0],1); assertIncrementKey(kvs[1],ROW,FAMILY,QUALIFIERS[1],3); assertIncrementKey(kvs[2],ROW,FAMILY,QUALIFIERS[2],3); assertIncrementKey(kvs[3],ROW,FAMILY,QUALIFIERS[3],5); assertIncrementKey(kvs[4],ROW,FAMILY,QUALIFIERS[4],1); inc=new Increment(ROWS[0]); for (int i=0; i < QUALIFIERS.length; i++) { inc.addColumn(FAMILY,QUALIFIERS[i],i + 1); } ht.increment(inc); get=new Get(ROWS[0]); r=ht.get(get); kvs=r.rawCells(); assertEquals(QUALIFIERS.length,kvs.length); for (int i=0; i < QUALIFIERS.length; i++) { assertIncrementKey(kvs[i],ROWS[0],FAMILY,QUALIFIERS[i],i + 1); } inc=new Increment(ROWS[0]); for (int i=0; i < QUALIFIERS.length; i++) { inc.addColumn(FAMILY,QUALIFIERS[i],i + 1); } ht.increment(inc); r=ht.get(get); kvs=r.rawCells(); assertEquals(QUALIFIERS.length,kvs.length); for (int i=0; i < QUALIFIERS.length; i++) { assertIncrementKey(kvs[i],ROWS[0],FAMILY,QUALIFIERS[i],2 * (i + 1)); } inc=new Increment(ROWS[0]); for (int i=0; i < QUALIFIERS.length; i++) { inc.addColumn(FAMILY,QUALIFIERS[i],0); } ht.increment(inc); r=ht.get(get); kvs=r.rawCells(); assertEquals(QUALIFIERS.length,kvs.length); for (int i=0; i < QUALIFIERS.length; i++) { assertIncrementKey(kvs[i],ROWS[0],FAMILY,QUALIFIERS[i],2 * (i + 1)); } }

InternalCallVerifier EqualityVerifier 
@Test public void testIncrementWithDeletes() throws Exception { LOG.info("Starting " + this.name.getMethodName()); final TableName TABLENAME=TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); Table ht=TEST_UTIL.createTable(TABLENAME,FAMILY); final byte[] COLUMN=Bytes.toBytes("column"); ht.incrementColumnValue(ROW,FAMILY,COLUMN,5); TEST_UTIL.flush(TABLENAME); Delete del=new Delete(ROW); ht.delete(del); ht.incrementColumnValue(ROW,FAMILY,COLUMN,5); Get get=new Get(ROW); Result r=ht.get(get); assertEquals(1,r.size()); assertEquals(5,Bytes.toLong(r.getValue(FAMILY,COLUMN))); }

InternalCallVerifier EqualityVerifier 
@Test public void testIncrementOutOfOrder() throws Exception { LOG.info("Starting " + this.name.getMethodName()); final TableName TABLENAME=TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); Table ht=TEST_UTIL.createTable(TABLENAME,FAMILY); byte[][] QUALIFIERS=new byte[][]{Bytes.toBytes("B"),Bytes.toBytes("A"),Bytes.toBytes("C")}; Increment inc=new Increment(ROW); for (int i=0; i < QUALIFIERS.length; i++) { inc.addColumn(FAMILY,QUALIFIERS[i],1); } ht.increment(inc); Get get=new Get(ROW); Result r=ht.get(get); Cell[] kvs=r.rawCells(); assertEquals(3,kvs.length); assertIncrementKey(kvs[0],ROW,FAMILY,QUALIFIERS[1],1); assertIncrementKey(kvs[1],ROW,FAMILY,QUALIFIERS[0],1); assertIncrementKey(kvs[2],ROW,FAMILY,QUALIFIERS[2],1); inc=new Increment(ROW); for (int i=0; i < QUALIFIERS.length; i++) { inc.addColumn(FAMILY,QUALIFIERS[i],1); } ht.increment(inc); r=ht.get(get); kvs=r.rawCells(); assertEquals(3,kvs.length); assertIncrementKey(kvs[0],ROW,FAMILY,QUALIFIERS[1],2); assertIncrementKey(kvs[1],ROW,FAMILY,QUALIFIERS[0],2); assertIncrementKey(kvs[2],ROW,FAMILY,QUALIFIERS[2],2); }

Class: org.apache.hadoop.hbase.client.TestLeaseRenewal

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testLeaseRenewal() throws Exception { HTable table=TEST_UTIL.createTable(TableName.valueOf("testLeaseRenewal"),FAMILY); Put p=new Put(ROW_BYTES); p.addColumn(FAMILY,COL_QUAL,VAL_BYTES); table.put(p); p=new Put(ANOTHERROW); p.addColumn(FAMILY,COL_QUAL,VAL_BYTES); table.put(p); Scan s=new Scan(); s.setCaching(1); ResultScanner rs=table.getScanner(s); assertTrue(rs.renewLease()); assertTrue(Arrays.equals(rs.next().getRow(),ANOTHERROW)); Thread.sleep(leaseTimeout / 2); assertTrue(rs.renewLease()); Thread.sleep(leaseTimeout / 2); assertTrue(rs.renewLease()); Thread.sleep(leaseTimeout / 2); assertTrue(rs.renewLease()); assertTrue(Arrays.equals(rs.next().getRow(),ROW_BYTES)); assertTrue(rs.renewLease()); assertNull(rs.next()); assertFalse(rs.renewLease()); rs.close(); table.close(); MetricsHBaseServerSource serverSource=TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRpcServer().getMetrics().getMetricsSource(); HELPER.assertCounter("exceptions.OutOfOrderScannerNextException",0,serverSource); }

Class: org.apache.hadoop.hbase.client.TestMetaWithReplicas

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testZookeeperNodesForReplicas() throws Exception { ZooKeeperWatcher zkw=TEST_UTIL.getZooKeeperWatcher(); Configuration conf=TEST_UTIL.getConfiguration(); String baseZNode=conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); String primaryMetaZnode=ZKUtil.joinZNode(baseZNode,conf.get("zookeeper.znode.metaserver","meta-region-server")); byte[] data=ZKUtil.getData(zkw,primaryMetaZnode); ServerName.parseFrom(data); for (int i=1; i < 3; i++) { String secZnode=ZKUtil.joinZNode(baseZNode,conf.get("zookeeper.znode.metaserver","meta-region-server") + "-" + i); String str=zkw.getZNodeForReplica(i); assertTrue(str.equals(secZnode)); data=ZKUtil.getData(zkw,secZnode); ServerName.parseFrom(data); } }

InternalCallVerifier BooleanVerifier 
@Test public void testMetaAddressChange() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); ZooKeeperWatcher zkw=TEST_UTIL.getZooKeeperWatcher(); String baseZNode=conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); String primaryMetaZnode=ZKUtil.joinZNode(baseZNode,conf.get("zookeeper.znode.metaserver","meta-region-server")); byte[] data=ZKUtil.getData(zkw,primaryMetaZnode); ServerName currentServer=ServerName.parseFrom(data); Collection liveServers=TEST_UTIL.getHBaseAdmin().getClusterStatus().getServers(); ServerName moveToServer=null; for ( ServerName s : liveServers) { if (!currentServer.equals(s)) { moveToServer=s; } } assert (moveToServer != null); TableName tableName=TableName.valueOf("randomTable5678"); TEST_UTIL.createTable(tableName,"f"); assertTrue(TEST_UTIL.getHBaseAdmin().tableExists(tableName)); TEST_UTIL.getHBaseAdmin().move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),Bytes.toBytes(moveToServer.getServerName())); int i=0; do { Thread.sleep(10); data=ZKUtil.getData(zkw,primaryMetaZnode); currentServer=ServerName.parseFrom(data); i++; } while (!moveToServer.equals(currentServer) && i < 1000); assert (i != 1000); TEST_UTIL.getHBaseAdmin().disableTable(tableName); assertTrue(TEST_UTIL.getHBaseAdmin().isTableDisabled(tableName)); }

Class: org.apache.hadoop.hbase.client.TestMultiParallel

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testBatchWithDelete() throws Exception { LOG.info("test=testBatchWithDelete"); Table table=UTIL.getConnection().getTable(TEST_TABLE); List puts=constructPutRequests(); Object[] results=new Object[puts.size()]; table.batch(puts,results); validateSizeAndEmpty(results,KEYS.length); List deletes=new ArrayList(); for (int i=0; i < KEYS.length; i++) { Delete delete=new Delete(KEYS[i]); delete.addFamily(BYTES_FAMILY); deletes.add(delete); } results=new Object[deletes.size()]; table.batch(deletes,results); validateSizeAndEmpty(results,KEYS.length); for ( byte[] k : KEYS) { Get get=new Get(k); get.addColumn(BYTES_FAMILY,QUALIFIER); Assert.assertFalse(table.exists(get)); } table.close(); }

InternalCallVerifier EqualityVerifier 
/** * This is for testing the active number of threads that were used while * doing a batch operation. It inserts one row per region via the batch * operation, and then checks the number of active threads. * For HBASE-3553 * @throws IOException * @throws InterruptedException * @throws NoSuchFieldException * @throws SecurityException */ @Test(timeout=300000) public void testActiveThreadsCount() throws Exception { UTIL.getConfiguration().setLong("hbase.htable.threads.coresize",slaves + 1); try (Connection connection=ConnectionFactory.createConnection(UTIL.getConfiguration())){ ThreadPoolExecutor executor=HTable.getDefaultExecutor(UTIL.getConfiguration()); try { try (Table t=connection.getTable(TEST_TABLE,executor)){ List puts=constructPutRequests(); t.batch(puts,null); HashSet regionservers=new HashSet(); try (RegionLocator locator=connection.getRegionLocator(TEST_TABLE)){ for ( Row r : puts) { HRegionLocation location=locator.getRegionLocation(r.getRow()); regionservers.add(location.getServerName()); } } assertEquals(regionservers.size(),executor.getLargestPoolSize()); } } finally { executor.shutdownNow(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testHTableDeleteWithList() throws Exception { LOG.info("test=testHTableDeleteWithList"); Table table=UTIL.getConnection().getTable(TEST_TABLE); List puts=constructPutRequests(); Object[] results=new Object[puts.size()]; table.batch(puts,results); validateSizeAndEmpty(results,KEYS.length); ArrayList deletes=new ArrayList(); for (int i=0; i < KEYS.length; i++) { Delete delete=new Delete(KEYS[i]); delete.addFamily(BYTES_FAMILY); deletes.add(delete); } table.delete(deletes); Assert.assertTrue(deletes.isEmpty()); for ( byte[] k : KEYS) { Get get=new Get(k); get.addColumn(BYTES_FAMILY,QUALIFIER); Assert.assertFalse(table.exists(get)); } table.close(); }

Class: org.apache.hadoop.hbase.client.TestMultiRespectsLimits

InternalCallVerifier EqualityVerifier 
@Test public void testMultiLimits() throws Exception { final TableName name=TableName.valueOf("testMultiLimits"); Table t=TEST_UTIL.createTable(name,FAMILY); TEST_UTIL.loadTable(t,FAMILY,false); try (final Admin admin=TEST_UTIL.getAdmin()){ admin.split(name); TEST_UTIL.waitFor(60000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return admin.getTableRegions(name).size() > 1; } } ); } List gets=new ArrayList<>(MAX_SIZE); for (int i=0; i < MAX_SIZE; i++) { gets.add(new Get(HBaseTestingUtility.ROWS[i])); } RpcServerInterface rpcServer=TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer(); BaseSource s=rpcServer.getMetrics().getMetricsSource(); long startingExceptions=METRICS_ASSERT.getCounter("exceptions",s); long startingMultiExceptions=METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge",s); Result[] results=t.get(gets); assertEquals(MAX_SIZE,results.length); METRICS_ASSERT.assertCounterGt("exceptions",startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE),s); METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge",startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE),s); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBlockMultiLimits() throws Exception { final TableName name=TableName.valueOf("testBlockMultiLimits"); HTableDescriptor desc=new HTableDescriptor(name); HColumnDescriptor hcd=new HColumnDescriptor(FAMILY); hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); Table t=TEST_UTIL.getConnection().getTable(name); final HRegionServer regionServer=TEST_UTIL.getHBaseCluster().getRegionServer(0); RpcServerInterface rpcServer=regionServer.getRpcServer(); BaseSource s=rpcServer.getMetrics().getMetricsSource(); long startingExceptions=METRICS_ASSERT.getCounter("exceptions",s); long startingMultiExceptions=METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge",s); byte[] row=Bytes.toBytes("TEST"); byte[][] cols=new byte[][]{Bytes.toBytes("0"),Bytes.toBytes("1"),Bytes.toBytes("2"),Bytes.toBytes("3"),Bytes.toBytes("4"),Bytes.toBytes("5")}; byte[] value=new byte[MAX_SIZE - 100]; ThreadLocalRandom.current().nextBytes(value); for ( byte[] col : cols) { Put p=new Put(row); p.addImmutable(FAMILY,col,value); t.put(p); } try (final Admin admin=TEST_UTIL.getAdmin()){ admin.flush(name); TEST_UTIL.waitFor(60000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return regionServer.getOnlineRegions(name).get(0).getMaxFlushedSeqId() > 3; } } ); } List gets=new ArrayList<>(2); Get g0=new Get(row); g0.addColumn(FAMILY,cols[0]); gets.add(g0); Get g2=new Get(row); g2.addColumn(FAMILY,cols[3]); gets.add(g2); Result[] results=t.get(gets); assertEquals(2,results.length); METRICS_ASSERT.assertCounterGt("exceptions",startingExceptions,s); METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge",startingMultiExceptions,s); }

Class: org.apache.hadoop.hbase.client.TestMultipleTimestamps

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReseeksWithMultipleColumnMultipleTimestamp() throws IOException { LOG.info("testReseeksWithMultipleColumnMultipleTimestamp"); TableName TABLE=TableName.valueOf("testReseeksWithMultipleColumnMiltipleTimestamps"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,Integer.MAX_VALUE); Integer[] putRows=new Integer[]{1,3,5,7}; Integer[] putColumns=new Integer[]{1,3,5}; Long[] putTimestamps=new Long[]{1L,2L,3L,4L,5L}; Integer[] scanRows=new Integer[]{5,7}; Integer[] scanColumns=new Integer[]{3,4,5}; Long[] scanTimestamps=new Long[]{2l,3L}; int scanMaxVersions=2; put(ht,FAMILY,putRows,putColumns,putTimestamps); TEST_UTIL.flush(TABLE); Scan scan=new Scan(); scan.setMaxVersions(10); ResultScanner scanner=ht.getScanner(scan); while (true) { Result r=scanner.next(); if (r == null) break; LOG.info("r=" + r); } scanner=scan(ht,FAMILY,scanRows,scanColumns,scanTimestamps,scanMaxVersions); Cell[] kvs; kvs=scanner.next().rawCells(); assertEquals(4,kvs.length); checkOneCell(kvs[0],FAMILY,5,3,3); checkOneCell(kvs[1],FAMILY,5,3,2); checkOneCell(kvs[2],FAMILY,5,5,3); checkOneCell(kvs[3],FAMILY,5,5,2); kvs=scanner.next().rawCells(); assertEquals(4,kvs.length); checkOneCell(kvs[0],FAMILY,7,3,3); checkOneCell(kvs[1],FAMILY,7,3,2); checkOneCell(kvs[2],FAMILY,7,5,3); checkOneCell(kvs[3],FAMILY,7,5,2); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReseeksWithOneColumnMiltipleTimestamp() throws IOException { TableName TABLE=TableName.valueOf("testReseeksWithOne" + "ColumnMiltipleTimestamps"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,Integer.MAX_VALUE); Integer[] putRows=new Integer[]{1,3,5,7}; Integer[] putColumns=new Integer[]{1,3,5}; Long[] putTimestamps=new Long[]{1L,2L,3L,4L,5L}; Integer[] scanRows=new Integer[]{3,5}; Integer[] scanColumns=new Integer[]{3}; Long[] scanTimestamps=new Long[]{3L,4L}; int scanMaxVersions=2; put(ht,FAMILY,putRows,putColumns,putTimestamps); TEST_UTIL.flush(TABLE); ResultScanner scanner=scan(ht,FAMILY,scanRows,scanColumns,scanTimestamps,scanMaxVersions); Cell[] kvs; kvs=scanner.next().rawCells(); assertEquals(2,kvs.length); checkOneCell(kvs[0],FAMILY,3,3,4); checkOneCell(kvs[1],FAMILY,3,3,3); kvs=scanner.next().rawCells(); assertEquals(2,kvs.length); checkOneCell(kvs[0],FAMILY,5,3,4); checkOneCell(kvs[1],FAMILY,5,3,3); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testWithFamilyDeletes() throws IOException { TableName TABLE=TableName.valueOf("testWithFamilyDeletes"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,Integer.MAX_VALUE); putNVersions(ht,FAMILY,0,0,1,5); TEST_UTIL.flush(TABLE); deleteFamily(ht,FAMILY,0); Cell kvs[]=getNVersions(ht,FAMILY,0,0,Arrays.asList(2L,3L)); assertEquals(0,kvs.length); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testWithMultipleVersionDeletes() throws IOException { LOG.info("testWithMultipleVersionDeletes"); TableName TABLE=TableName.valueOf("testWithMultipleVersionDeletes"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,Integer.MAX_VALUE); putNVersions(ht,FAMILY,0,0,1,5); TEST_UTIL.flush(TABLE); deleteAllVersionsBefore(ht,FAMILY,0,0,4); Cell kvs[]=getNVersions(ht,FAMILY,0,0,Arrays.asList(2L,3L)); assertEquals(0,kvs.length); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReseeksWithMultipleColumnOneTimestamp() throws IOException { LOG.info("testReseeksWithMultipleColumnOneTimestamp"); TableName TABLE=TableName.valueOf("testReseeksWithMultiple" + "ColumnOneTimestamps"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,Integer.MAX_VALUE); Integer[] putRows=new Integer[]{1,3,5,7}; Integer[] putColumns=new Integer[]{1,3,5}; Long[] putTimestamps=new Long[]{1L,2L,3L,4L,5L}; Integer[] scanRows=new Integer[]{3,5}; Integer[] scanColumns=new Integer[]{3,4}; Long[] scanTimestamps=new Long[]{3L}; int scanMaxVersions=2; put(ht,FAMILY,putRows,putColumns,putTimestamps); TEST_UTIL.flush(TABLE); ResultScanner scanner=scan(ht,FAMILY,scanRows,scanColumns,scanTimestamps,scanMaxVersions); Cell[] kvs; kvs=scanner.next().rawCells(); assertEquals(1,kvs.length); checkOneCell(kvs[0],FAMILY,3,3,3); kvs=scanner.next().rawCells(); assertEquals(1,kvs.length); checkOneCell(kvs[0],FAMILY,5,3,3); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReseeksWithMultipleFiles() throws IOException { LOG.info("testReseeksWithMultipleFiles"); TableName TABLE=TableName.valueOf("testReseeksWithMultipleFiles"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,Integer.MAX_VALUE); Integer[] putRows1=new Integer[]{1,2,3}; Integer[] putColumns1=new Integer[]{2,5,6}; Long[] putTimestamps1=new Long[]{1L,2L,5L}; Integer[] putRows2=new Integer[]{6,7}; Integer[] putColumns2=new Integer[]{3,6}; Long[] putTimestamps2=new Long[]{4L,5L}; Integer[] putRows3=new Integer[]{2,3,5}; Integer[] putColumns3=new Integer[]{1,2,3}; Long[] putTimestamps3=new Long[]{4L,8L}; Integer[] scanRows=new Integer[]{3,5,7}; Integer[] scanColumns=new Integer[]{3,4,5}; Long[] scanTimestamps=new Long[]{2l,4L}; int scanMaxVersions=5; put(ht,FAMILY,putRows1,putColumns1,putTimestamps1); TEST_UTIL.flush(TABLE); put(ht,FAMILY,putRows2,putColumns2,putTimestamps2); TEST_UTIL.flush(TABLE); put(ht,FAMILY,putRows3,putColumns3,putTimestamps3); ResultScanner scanner=scan(ht,FAMILY,scanRows,scanColumns,scanTimestamps,scanMaxVersions); Cell[] kvs; kvs=scanner.next().rawCells(); assertEquals(2,kvs.length); checkOneCell(kvs[0],FAMILY,3,3,4); checkOneCell(kvs[1],FAMILY,3,5,2); kvs=scanner.next().rawCells(); assertEquals(1,kvs.length); checkOneCell(kvs[0],FAMILY,5,3,4); kvs=scanner.next().rawCells(); assertEquals(1,kvs.length); checkOneCell(kvs[0],FAMILY,6,3,4); kvs=scanner.next().rawCells(); assertEquals(1,kvs.length); checkOneCell(kvs[0],FAMILY,7,3,4); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testWithColumnDeletes() throws IOException { TableName TABLE=TableName.valueOf("testWithColumnDeletes"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TABLE,FAMILIES,Integer.MAX_VALUE); putNVersions(ht,FAMILY,0,0,1,5); TEST_UTIL.flush(TABLE); deleteColumn(ht,FAMILY,0,0); Cell kvs[]=getNVersions(ht,FAMILY,0,0,Arrays.asList(2L,3L)); assertEquals(0,kvs.length); ht.close(); }

Class: org.apache.hadoop.hbase.client.TestOperation

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPutCreationWithByteBuffer(){ Put p=new Put(ROW); List c=p.get(FAMILY,QUALIFIER); Assert.assertEquals(0,c.size()); Assert.assertEquals(HConstants.LATEST_TIMESTAMP,p.getTimeStamp()); p.addColumn(FAMILY,ByteBuffer.wrap(QUALIFIER),1984L,ByteBuffer.wrap(VALUE)); c=p.get(FAMILY,QUALIFIER); Assert.assertEquals(1,c.size()); Assert.assertEquals(1984L,c.get(0).getTimestamp()); Assert.assertArrayEquals(VALUE,CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP,p.getTimeStamp()); Assert.assertEquals(0,CellComparator.COMPARATOR.compare(c.get(0),new KeyValue(c.get(0)))); p=new Put(ROW); p.addColumn(FAMILY,ByteBuffer.wrap(QUALIFIER),2013L,null); c=p.get(FAMILY,QUALIFIER); Assert.assertEquals(1,c.size()); Assert.assertEquals(2013L,c.get(0).getTimestamp()); Assert.assertArrayEquals(new byte[]{},CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP,p.getTimeStamp()); Assert.assertEquals(0,CellComparator.COMPARATOR.compare(c.get(0),new KeyValue(c.get(0)))); p=new Put(ByteBuffer.wrap(ROW)); p.addColumn(FAMILY,ByteBuffer.wrap(QUALIFIER),2001L,null); c=p.get(FAMILY,QUALIFIER); Assert.assertEquals(1,c.size()); Assert.assertEquals(2001L,c.get(0).getTimestamp()); Assert.assertArrayEquals(new byte[]{},CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW,CellUtil.cloneRow(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP,p.getTimeStamp()); Assert.assertEquals(0,CellComparator.COMPARATOR.compare(c.get(0),new KeyValue(c.get(0)))); p=new Put(ByteBuffer.wrap(ROW),1970L); p.addColumn(FAMILY,ByteBuffer.wrap(QUALIFIER),2001L,null); c=p.get(FAMILY,QUALIFIER); Assert.assertEquals(1,c.size()); Assert.assertEquals(2001L,c.get(0).getTimestamp()); Assert.assertArrayEquals(new byte[]{},CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW,CellUtil.cloneRow(c.get(0))); Assert.assertEquals(1970L,p.getTimeStamp()); Assert.assertEquals(0,CellComparator.COMPARATOR.compare(c.get(0),new KeyValue(c.get(0)))); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test the client Operations' JSON encoding to ensure that produced JSON is * parseable and that the details are present and not corrupted. * @throws IOException */ @Test public void testOperationJSON() throws IOException { Scan scan=new Scan(ROW); scan.addColumn(FAMILY,QUALIFIER); String json=scan.toJSON(); Map parsedJSON=mapper.readValue(json,HashMap.class); assertEquals("startRow incorrect in Scan.toJSON()",Bytes.toStringBinary(ROW),parsedJSON.get("startRow")); List familyInfo=(List)((Map)parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Scan.toJSON()",familyInfo); assertEquals("Qualifier absent in Scan.toJSON()",1,familyInfo.size()); assertEquals("Qualifier incorrect in Scan.toJSON()",Bytes.toStringBinary(QUALIFIER),familyInfo.get(0)); Get get=new Get(ROW); get.addColumn(FAMILY,QUALIFIER); json=get.toJSON(); parsedJSON=mapper.readValue(json,HashMap.class); assertEquals("row incorrect in Get.toJSON()",Bytes.toStringBinary(ROW),parsedJSON.get("row")); familyInfo=(List)((Map)parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Get.toJSON()",familyInfo); assertEquals("Qualifier absent in Get.toJSON()",1,familyInfo.size()); assertEquals("Qualifier incorrect in Get.toJSON()",Bytes.toStringBinary(QUALIFIER),familyInfo.get(0)); Put put=new Put(ROW); put.addColumn(FAMILY,QUALIFIER,VALUE); json=put.toJSON(); parsedJSON=mapper.readValue(json,HashMap.class); assertEquals("row absent in Put.toJSON()",Bytes.toStringBinary(ROW),parsedJSON.get("row")); familyInfo=(List)((Map)parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Put.toJSON()",familyInfo); assertEquals("KeyValue absent in Put.toJSON()",1,familyInfo.size()); Map kvMap=(Map)familyInfo.get(0); assertEquals("Qualifier incorrect in Put.toJSON()",Bytes.toStringBinary(QUALIFIER),kvMap.get("qualifier")); assertEquals("Value length incorrect in Put.toJSON()",VALUE.length,kvMap.get("vlen")); Delete delete=new Delete(ROW); delete.addColumn(FAMILY,QUALIFIER); json=delete.toJSON(); parsedJSON=mapper.readValue(json,HashMap.class); assertEquals("row absent in Delete.toJSON()",Bytes.toStringBinary(ROW),parsedJSON.get("row")); familyInfo=(List)((Map)parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Delete.toJSON()",familyInfo); assertEquals("KeyValue absent in Delete.toJSON()",1,familyInfo.size()); kvMap=(Map)familyInfo.get(0); assertEquals("Qualifier incorrect in Delete.toJSON()",Bytes.toStringBinary(QUALIFIER),kvMap.get("qualifier")); }

Class: org.apache.hadoop.hbase.client.TestProcedureFuture

InternalCallVerifier BooleanVerifier 
/** * When a master return a result without procId, * we are skipping the getProcedureResult() call. */ @Test(timeout=60000) public void testWithoutProcId() throws Exception { HBaseAdmin admin=Mockito.mock(HBaseAdmin.class); TestFuture f=new TestFuture(admin,null); f.get(1,TimeUnit.MINUTES); assertFalse("unexpected getProcedureResult() called",f.wasGetProcedureResultCalled()); assertFalse("unexpected convertResult() called",f.wasConvertResultCalled()); assertTrue("expected waitOperationResult() to be called",f.wasWaitOperationResultCalled()); assertTrue("expected postOperationResult() to be called",f.wasPostOperationResultCalled()); }

InternalCallVerifier BooleanVerifier 
/** * When a new client with procedure support tries to ask an old-master without proc-support * the procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException) * The future should trap that and fallback to the waitOperationResult(). * This happens when the operation calls happens on a "new master" but while we are waiting * the operation to be completed, we failover on an "old master". */ @Test(timeout=60000) public void testOnServerWithNoProcedureSupport() throws Exception { HBaseAdmin admin=Mockito.mock(HBaseAdmin.class); TestFuture f=new TestFuture(admin,100L){ @Override protected GetProcedureResultResponse getProcedureResult( final GetProcedureResultRequest request) throws IOException { super.getProcedureResult(request); throw new DoNotRetryIOException(new UnsupportedOperationException("getProcedureResult")); } } ; f.get(1,TimeUnit.MINUTES); assertTrue("expected getProcedureResult() to be called",f.wasGetProcedureResultCalled()); assertFalse("unexpected convertResult() called",f.wasConvertResultCalled()); assertTrue("expected waitOperationResult() to be called",f.wasWaitOperationResultCalled()); assertTrue("expected postOperationResult() to be called",f.wasPostOperationResultCalled()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that the spin loop for the procedure running works. */ @Test(timeout=60000) public void testWithProcIdAndSpinning() throws Exception { final AtomicInteger spinCount=new AtomicInteger(0); HBaseAdmin admin=Mockito.mock(HBaseAdmin.class); TestFuture f=new TestFuture(admin,100L){ @Override protected GetProcedureResultResponse getProcedureResult( final GetProcedureResultRequest request) throws IOException { boolean done=spinCount.incrementAndGet() >= 10; return GetProcedureResultResponse.newBuilder().setState(done ? GetProcedureResultResponse.State.FINISHED : GetProcedureResultResponse.State.RUNNING).build(); } } ; f.get(1,TimeUnit.MINUTES); assertEquals(10,spinCount.get()); assertTrue("expected convertResult() to be called",f.wasConvertResultCalled()); assertFalse("unexpected waitOperationResult() called",f.wasWaitOperationResultCalled()); assertTrue("expected postOperationResult() to be called",f.wasPostOperationResultCalled()); }

InternalCallVerifier BooleanVerifier 
/** * When a master return a result with procId, * we are skipping the waitOperationResult() call, * since we are getting the procedure result. */ @Test(timeout=60000) public void testWithProcId() throws Exception { HBaseAdmin admin=Mockito.mock(HBaseAdmin.class); TestFuture f=new TestFuture(admin,100L); f.get(1,TimeUnit.MINUTES); assertTrue("expected getProcedureResult() to be called",f.wasGetProcedureResultCalled()); assertTrue("expected convertResult() to be called",f.wasConvertResultCalled()); assertFalse("unexpected waitOperationResult() called",f.wasWaitOperationResultCalled()); assertTrue("expected postOperationResult() to be called",f.wasPostOperationResultCalled()); }

Class: org.apache.hadoop.hbase.client.TestPut

InternalCallVerifier EqualityVerifier 
@Test public void testCopyConstructor(){ Put origin=new Put(Bytes.toBytes("ROW-01")); byte[] family=Bytes.toBytes("CF-01"); byte[] qualifier=Bytes.toBytes("Q-01"); origin.addColumn(family,qualifier,Bytes.toBytes("V-01")); Put clone=new Put(origin); assertEquals(origin.getCellList(family),clone.getCellList(family)); origin.addColumn(family,qualifier,Bytes.toBytes("V-02")); assertNotEquals(origin.getCellList(family),clone.getCellList(family)); }

Class: org.apache.hadoop.hbase.client.TestPutDeleteEtcCellIteration

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIncrementIteration() throws IOException { Increment increment=new Increment(ROW); for (int i=0; i < COUNT; i++) { byte[] bytes=Bytes.toBytes(i); increment.addColumn(bytes,bytes,i); } int index=0; for (CellScanner cellScanner=increment.cellScanner(); cellScanner.advance(); ) { Cell cell=cellScanner.current(); int value=index; byte[] bytes=Bytes.toBytes(index++); KeyValue kv=(KeyValue)cell; assertTrue(Bytes.equals(CellUtil.cloneFamily(kv),bytes)); long a=Bytes.toLong(CellUtil.cloneValue(kv)); assertEquals(value,a); } assertEquals(COUNT,index); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAppendIteration() throws IOException { Append a=new Append(ROW); for (int i=0; i < COUNT; i++) { byte[] bytes=Bytes.toBytes(i); a.add(bytes,bytes,bytes); } int index=0; for (CellScanner cellScanner=a.cellScanner(); cellScanner.advance(); ) { Cell cell=cellScanner.current(); byte[] bytes=Bytes.toBytes(index++); KeyValue kv=(KeyValue)cell; assertTrue(Bytes.equals(CellUtil.cloneFamily(kv),bytes)); assertTrue(Bytes.equals(CellUtil.cloneValue(kv),bytes)); } assertEquals(COUNT,index); }

Class: org.apache.hadoop.hbase.client.TestPutDotHas

InternalCallVerifier BooleanVerifier 
@Test public void testHasIgnoreValueIgnoreTS(){ Assert.assertTrue(put.has(FAMILY_01,QUALIFIER_01)); Assert.assertFalse(put.has(QUALIFIER_01,FAMILY_01)); }

InternalCallVerifier BooleanVerifier 
@Test public void testHas(){ Assert.assertTrue(put.has(FAMILY_01,QUALIFIER_01,TS,VALUE_01)); Assert.assertFalse(put.has(FAMILY_01,QUALIFIER_01,TS + 1,VALUE_01)); Assert.assertFalse(put.has(FAMILY_01,QUALIFIER_01,TS,QUALIFIER_01)); Assert.assertFalse(put.has(QUALIFIER_01,QUALIFIER_01,TS,VALUE_01)); Assert.assertFalse(put.has(FAMILY_01,FAMILY_01,TS,VALUE_01)); }

InternalCallVerifier BooleanVerifier 
@Test public void testHasIgnoreTS(){ Assert.assertTrue(put.has(FAMILY_01,QUALIFIER_01,VALUE_01)); Assert.assertFalse(put.has(FAMILY_01,VALUE_01,QUALIFIER_01)); }

InternalCallVerifier BooleanVerifier 
@Test public void testHasIgnoreValue(){ Assert.assertTrue(put.has(FAMILY_01,QUALIFIER_01,TS)); Assert.assertFalse(put.has(FAMILY_01,QUALIFIER_01,TS + 1)); }

Class: org.apache.hadoop.hbase.client.TestPutWithDelete

InternalCallVerifier BooleanVerifier 
@Test public void testHbasePutDeleteCell() throws Exception { final TableName tableName=TableName.valueOf("TestPutWithDelete"); final byte[] rowKey=Bytes.toBytes("12345"); final byte[] family=Bytes.toBytes("cf"); Table table=TEST_UTIL.createTable(tableName,family); TEST_UTIL.waitTableAvailable(tableName.getName(),5000); try { Put put=new Put(rowKey); put.addColumn(family,Bytes.toBytes("A"),Bytes.toBytes("a")); put.addColumn(family,Bytes.toBytes("B"),Bytes.toBytes("b")); put.addColumn(family,Bytes.toBytes("C"),Bytes.toBytes("c")); put.addColumn(family,Bytes.toBytes("D"),Bytes.toBytes("d")); table.put(put); Get get=new Get(rowKey); Result result=table.get(get); assertTrue("Column A value should be a",Bytes.toString(result.getValue(family,Bytes.toBytes("A"))).equals("a")); assertTrue("Column B value should be b",Bytes.toString(result.getValue(family,Bytes.toBytes("B"))).equals("b")); assertTrue("Column C value should be c",Bytes.toString(result.getValue(family,Bytes.toBytes("C"))).equals("c")); assertTrue("Column D value should be d",Bytes.toString(result.getValue(family,Bytes.toBytes("D"))).equals("d")); put=new Put(rowKey); put.addColumn(family,Bytes.toBytes("A"),Bytes.toBytes("a1")); put.addColumn(family,Bytes.toBytes("B"),Bytes.toBytes("b1")); KeyValue marker=new KeyValue(rowKey,family,Bytes.toBytes("C"),HConstants.LATEST_TIMESTAMP,KeyValue.Type.DeleteColumn); put.addColumn(family,Bytes.toBytes("D"),Bytes.toBytes("d1")); put.add(marker); table.put(put); get=new Get(rowKey); result=table.get(get); assertTrue("Column A value should be a1",Bytes.toString(result.getValue(family,Bytes.toBytes("A"))).equals("a1")); assertTrue("Column B value should be b1",Bytes.toString(result.getValue(family,Bytes.toBytes("B"))).equals("b1")); assertTrue("Column C should not exist",result.getValue(family,Bytes.toBytes("C")) == null); assertTrue("Column D value should be d1",Bytes.toString(result.getValue(family,Bytes.toBytes("D"))).equals("d1")); } finally { table.close(); } }

Class: org.apache.hadoop.hbase.client.TestReplicaWithCluster

InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testCreateDeleteTable() throws IOException { HTableDescriptor hdt=HTU.createTableDescriptor("testCreateDeleteTable"); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table=HTU.createTable(hdt,new byte[][]{f},null); Put p=new Put(row); p.addColumn(f,row,row); table.put(p); Get g=new Get(row); Result r=table.get(g); Assert.assertFalse(r.isStale()); try { SlowMeCopro.cdl.set(new CountDownLatch(1)); g=new Get(row); g.setConsistency(Consistency.TIMELINE); r=table.get(g); Assert.assertTrue(r.isStale()); SlowMeCopro.cdl.get().countDown(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testChangeTable() throws Exception { HTableDescriptor hdt=HTU.createTableDescriptor("testChangeTable"); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table=HTU.createTable(hdt,new byte[][]{f},null); Put p=new Put(row); p.addColumn(f,row,row); table.put(p); Get g=new Get(row); Result r=table.get(g); Assert.assertFalse(r.isStale()); HTableDescriptor bHdt=HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName()); HColumnDescriptor hcd=new HColumnDescriptor(row); hdt.addFamily(hcd); HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.getHBaseAdmin().modifyTable(hdt.getTableName(),hdt); HTU.getHBaseAdmin().enableTable(hdt.getTableName()); HTableDescriptor nHdt=HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName()); Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()),bHdt.getColumnFamilies().length + 1,nHdt.getColumnFamilies().length); p=new Put(row); p.addColumn(row,row,row); table.put(p); g=new Get(row); r=table.get(g); Assert.assertFalse(r.isStale()); try { SlowMeCopro.cdl.set(new CountDownLatch(1)); g=new Get(row); g.setConsistency(Consistency.TIMELINE); r=table.get(g); Assert.assertTrue(r.isStale()); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } Admin admin=HTU.getHBaseAdmin(); nHdt=admin.getTableDescriptor(hdt.getTableName()); Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()),bHdt.getColumnFamilies().length + 1,nHdt.getColumnFamilies().length); admin.disableTable(hdt.getTableName()); admin.deleteTable(hdt.getTableName()); admin.close(); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testBulkLoad() throws IOException { LOG.debug("Creating test table"); HTableDescriptor hdt=HTU.createTableDescriptor("testBulkLoad"); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table=HTU.createTable(hdt,new byte[][]{f},null); LOG.debug("Creating test data"); Path dir=HTU.getDataTestDirOnTestFS("testBulkLoad"); final int numRows=10; final byte[] qual=Bytes.toBytes("qual"); final byte[] val=Bytes.toBytes("val"); final List> famPaths=new ArrayList>(); for ( HColumnDescriptor col : hdt.getColumnFamilies()) { Path hfile=new Path(dir,col.getNameAsString()); TestHRegionServerBulkLoad.createHFile(HTU.getTestFileSystem(),hfile,col.getName(),qual,val,numRows); famPaths.add(new Pair(col.getName(),hfile.toString())); } LOG.debug("Loading test data"); @SuppressWarnings("deprecation") final HConnection conn=HTU.getHBaseAdmin().getConnection(); RegionServerCallable callable=new RegionServerCallable(conn,hdt.getTableName(),TestHRegionServerBulkLoad.rowkey(0)){ @Override public Void call( int timeout) throws Exception { LOG.debug("Going to connect to server " + getLocation() + " for row "+ Bytes.toStringBinary(getRow())); byte[] regionName=getLocation().getRegionInfo().getRegionName(); BulkLoadHFileRequest request=RequestConverter.buildBulkLoadHFileRequest(famPaths,regionName,true); getStub().bulkLoadHFile(null,request); return null; } } ; RpcRetryingCallerFactory factory=new RpcRetryingCallerFactory(HTU.getConfiguration()); RpcRetryingCaller caller=factory.newCaller(); caller.callWithRetries(callable,10000); LOG.debug("Verifying data load"); for (int i=0; i < numRows; i++) { byte[] row=TestHRegionServerBulkLoad.rowkey(i); Get g=new Get(row); Result r=table.get(g); Assert.assertFalse(r.isStale()); } LOG.debug("Verifying replica queries"); try { SlowMeCopro.cdl.set(new CountDownLatch(1)); for (int i=0; i < numRows; i++) { byte[] row=TestHRegionServerBulkLoad.rowkey(i); Get g=new Get(row); g.setConsistency(Consistency.TIMELINE); Result r=table.get(g); Assert.assertTrue(r.isStale()); } SlowMeCopro.cdl.get().countDown(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); }

Class: org.apache.hadoop.hbase.client.TestReplicasClient

InternalCallVerifier BooleanVerifier 
@Test public void testUseRegionWithReplica() throws Exception { byte[] b1="testUseRegionWithReplica".getBytes(); openRegion(hriSecondary); try { Put p=new Put(b1); p.addColumn(f,b1,b1); table.put(p); LOG.info("Put done"); Get g=new Get(b1); Result r=table.get(g); Assert.assertFalse(r.isStale()); Assert.assertFalse(r.getColumnCells(f,b1).isEmpty()); LOG.info("get works and is not stale done"); SlowMeCopro.sleepTime.set(2000); g=new Get(b1); r=table.get(g); Assert.assertFalse(r.isStale()); Assert.assertFalse(r.getColumnCells(f,b1).isEmpty()); SlowMeCopro.sleepTime.set(0); LOG.info("sleep and is not stale done"); SlowMeCopro.getCdl().set(new CountDownLatch(1)); g=new Get(b1); g.setConsistency(Consistency.TIMELINE); r=table.get(g); Assert.assertTrue(r.isStale()); Assert.assertTrue(r.getColumnCells(f,b1).isEmpty()); SlowMeCopro.getCdl().get().countDown(); LOG.info("stale done"); g=new Get(b1); g.setCheckExistenceOnly(true); r=table.get(g); Assert.assertFalse(r.isStale()); Assert.assertTrue(r.getExists()); LOG.info("exists not stale done"); SlowMeCopro.getCdl().set(new CountDownLatch(1)); g=new Get(b1); g.setCheckExistenceOnly(true); g.setConsistency(Consistency.TIMELINE); r=table.get(g); Assert.assertTrue(r.isStale()); Assert.assertFalse("The secondary has stale data",r.getExists()); SlowMeCopro.getCdl().get().countDown(); LOG.info("exists stale before flush done"); flushRegion(hriPrimary); flushRegion(hriSecondary); LOG.info("flush done"); Thread.sleep(1000 + REFRESH_PERIOD * 2); SlowMeCopro.getCdl().set(new CountDownLatch(1)); g=new Get(b1); g.setConsistency(Consistency.TIMELINE); r=table.get(g); Assert.assertTrue(r.isStale()); Assert.assertFalse(r.isEmpty()); SlowMeCopro.getCdl().get().countDown(); LOG.info("stale done"); SlowMeCopro.getCdl().set(new CountDownLatch(1)); g=new Get(b1); g.setCheckExistenceOnly(true); g.setConsistency(Consistency.TIMELINE); r=table.get(g); Assert.assertTrue(r.isStale()); Assert.assertTrue(r.getExists()); SlowMeCopro.getCdl().get().countDown(); LOG.info("exists stale after flush done"); } finally { SlowMeCopro.getCdl().get().countDown(); SlowMeCopro.sleepTime.set(0); Delete d=new Delete(b1); table.delete(d); closeRegion(hriSecondary); } }

InternalCallVerifier BooleanVerifier 
@Test public void testGetNoResultNotStaleSleepRegionWithReplica() throws Exception { byte[] b1="testGetNoResultNotStaleSleepRegionWithReplica".getBytes(); openRegion(hriSecondary); try { SlowMeCopro.sleepTime.set(2000); Get g=new Get(b1); Result r=table.get(g); Assert.assertFalse(r.isStale()); } finally { SlowMeCopro.sleepTime.set(0); closeRegion(hriSecondary); } }

InternalCallVerifier EqualityVerifier 
@Test public void testLocations() throws Exception { byte[] b1="testLocations".getBytes(); openRegion(hriSecondary); ClusterConnection hc=(ClusterConnection)HTU.getHBaseAdmin().getConnection(); try { hc.clearRegionCache(); RegionLocations rl=hc.locateRegion(table.getName(),b1,false,false); Assert.assertEquals(2,rl.size()); rl=hc.locateRegion(table.getName(),b1,true,false); Assert.assertEquals(2,rl.size()); hc.clearRegionCache(); rl=hc.locateRegion(table.getName(),b1,true,false); Assert.assertEquals(2,rl.size()); rl=hc.locateRegion(table.getName(),b1,false,false); Assert.assertEquals(2,rl.size()); } finally { closeRegion(hriSecondary); } }

InternalCallVerifier BooleanVerifier 
@Test public void testCancelOfScan() throws Exception { openRegion(hriSecondary); int NUMROWS=100; try { for (int i=0; i < NUMROWS; i++) { byte[] b1=Bytes.toBytes("testUseRegionWithReplica" + i); Put p=new Put(b1); p.addColumn(f,b1,b1); table.put(p); } LOG.debug("PUT done"); int caching=20; byte[] start; start=Bytes.toBytes("testUseRegionWithReplica" + 0); flushRegion(hriPrimary); LOG.info("flush done"); Thread.sleep(1000 + REFRESH_PERIOD * 2); SlowMeCopro.slowDownNext.set(true); SlowMeCopro.countOfNext.set(0); SlowMeCopro.sleepTime.set(5000); Scan scan=new Scan(start); scan.setCaching(caching); scan.setConsistency(Consistency.TIMELINE); ResultScanner scanner=table.getScanner(scan); Iterator iter=scanner.iterator(); iter.next(); Assert.assertTrue(((ClientScanner)scanner).isAnyRPCcancelled()); SlowMeCopro.slowDownNext.set(false); SlowMeCopro.countOfNext.set(0); } finally { SlowMeCopro.getCdl().get().countDown(); SlowMeCopro.sleepTime.set(0); SlowMeCopro.slowDownNext.set(false); SlowMeCopro.countOfNext.set(0); for (int i=0; i < NUMROWS; i++) { byte[] b1=Bytes.toBytes("testUseRegionWithReplica" + i); Delete d=new Delete(b1); table.delete(d); } closeRegion(hriSecondary); } }

InternalCallVerifier BooleanVerifier 
@Test public void testGetNoResultNoStaleRegionWithReplica() throws Exception { byte[] b1="testGetNoResultNoStaleRegionWithReplica".getBytes(); openRegion(hriSecondary); try { Get g=new Get(b1); Result r=table.get(g); Assert.assertFalse(r.isStale()); } finally { closeRegion(hriSecondary); } }

InternalCallVerifier BooleanVerifier 
@Test public void testUseRegionWithoutReplica() throws Exception { byte[] b1="testUseRegionWithoutReplica".getBytes(); openRegion(hriSecondary); SlowMeCopro.getCdl().set(new CountDownLatch(0)); try { Get g=new Get(b1); Result r=table.get(g); Assert.assertFalse(r.isStale()); } finally { closeRegion(hriSecondary); } }

InternalCallVerifier BooleanVerifier 
@Test public void testCancelOfMultiGet() throws Exception { openRegion(hriSecondary); try { List puts=new ArrayList(2); byte[] b1=Bytes.toBytes("testCancelOfMultiGet" + 0); Put p=new Put(b1); p.addColumn(f,b1,b1); puts.add(p); byte[] b2=Bytes.toBytes("testCancelOfMultiGet" + 1); p=new Put(b2); p.addColumn(f,b2,b2); puts.add(p); table.put(puts); LOG.debug("PUT done"); flushRegion(hriPrimary); LOG.info("flush done"); Thread.sleep(1000 + REFRESH_PERIOD * 2); AsyncProcess ap=((ClusterConnection)HTU.getConnection()).getAsyncProcess(); SlowMeCopro.getCdl().set(new CountDownLatch(1)); List gets=new ArrayList(); Get g=new Get(b1); g.setCheckExistenceOnly(true); g.setConsistency(Consistency.TIMELINE); gets.add(g); g=new Get(b2); g.setCheckExistenceOnly(true); g.setConsistency(Consistency.TIMELINE); gets.add(g); Object[] results=new Object[2]; AsyncRequestFuture reqs=ap.submitAll(HTable.getDefaultExecutor(HTU.getConfiguration()),table.getName(),gets,null,results); reqs.waitUntilDone(); for ( Object r : results) { Assert.assertTrue(((Result)r).isStale()); Assert.assertTrue(((Result)r).getExists()); } Set> set=((AsyncRequestFutureImpl)reqs).getCallsInProgress(); Assert.assertTrue(!set.isEmpty()); for ( MultiServerCallable m : set) { Assert.assertTrue(m.isCancelled()); } } finally { SlowMeCopro.getCdl().get().countDown(); SlowMeCopro.sleepTime.set(0); SlowMeCopro.slowDownNext.set(false); SlowMeCopro.countOfNext.set(0); for (int i=0; i < 2; i++) { byte[] b1=Bytes.toBytes("testCancelOfMultiGet" + i); Delete d=new Delete(b1); table.delete(d); } closeRegion(hriSecondary); } }

InternalCallVerifier BooleanVerifier 
@Test public void testGetNoResultStaleRegionWithReplica() throws Exception { byte[] b1="testGetNoResultStaleRegionWithReplica".getBytes(); openRegion(hriSecondary); SlowMeCopro.getCdl().set(new CountDownLatch(1)); try { Get g=new Get(b1); g.setConsistency(Consistency.TIMELINE); Result r=table.get(g); Assert.assertTrue(r.isStale()); } finally { SlowMeCopro.getCdl().get().countDown(); closeRegion(hriSecondary); } }

Class: org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testCorruptedSnapshot() throws IOException, InterruptedException { SnapshotTestingUtils.corruptSnapshot(TEST_UTIL,Bytes.toString(snapshotName0)); TableName cloneName=TableName.valueOf("corruptedClone-" + System.currentTimeMillis()); try { admin.cloneSnapshot(snapshotName0,cloneName); fail("Expected CorruptedSnapshotException, got succeeded cloneSnapshot()"); } catch ( CorruptedSnapshotException e) { assertFalse(admin.tableExists(cloneName)); } catch ( Exception e) { fail("Expected CorruptedSnapshotException got: " + e); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testRestoreSchemaChange() throws Exception { Table table=TEST_UTIL.getConnection().getTable(tableName); admin.disableTable(tableName); admin.addColumnFamily(tableName,getTestRestoreSchemaChangeHCD()); admin.enableTable(tableName); assertEquals(2,table.getTableDescriptor().getFamilies().size()); HTableDescriptor htd=admin.getTableDescriptor(tableName); assertEquals(2,htd.getFamilies().size()); SnapshotTestingUtils.loadData(TEST_UTIL,tableName,500,TEST_FAMILY2); long snapshot2Rows=snapshot1Rows + 500; assertEquals(snapshot2Rows,countRows(table)); assertEquals(500,countRows(table,TEST_FAMILY2)); Set fsFamilies=getFamiliesFromFS(tableName); assertEquals(2,fsFamilies.size()); admin.disableTable(tableName); admin.snapshot(snapshotName2,tableName); admin.restoreSnapshot(snapshotName0); admin.enableTable(tableName); assertEquals(1,table.getTableDescriptor().getFamilies().size()); try { countRows(table,TEST_FAMILY2); fail("family '" + Bytes.toString(TEST_FAMILY2) + "' should not exists"); } catch ( NoSuchColumnFamilyException e) { } assertEquals(snapshot0Rows,countRows(table)); htd=admin.getTableDescriptor(tableName); assertEquals(1,htd.getFamilies().size()); fsFamilies=getFamiliesFromFS(tableName); assertEquals(1,fsFamilies.size()); admin.disableTable(tableName); admin.restoreSnapshot(snapshotName2); admin.enableTable(tableName); htd=admin.getTableDescriptor(tableName); assertEquals(2,htd.getFamilies().size()); assertEquals(2,table.getTableDescriptor().getFamilies().size()); assertEquals(500,countRows(table,TEST_FAMILY2)); assertEquals(snapshot2Rows,countRows(table)); fsFamilies=getFamiliesFromFS(tableName); assertEquals(2,fsFamilies.size()); table.close(); }

Class: org.apache.hadoop.hbase.client.TestRpcControllerFactory

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFallbackToDefaultRpcControllerFactory(){ Configuration conf=new Configuration(UTIL.getConfiguration()); conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,"foo.bar.Baz"); RpcControllerFactory factory=RpcControllerFactory.instantiate(conf); assertNotNull(factory); assertEquals(factory.getClass(),RpcControllerFactory.class); }

Class: org.apache.hadoop.hbase.client.TestScan

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testScanAttributes(){ Scan scan=new Scan(); Assert.assertTrue(scan.getAttributesMap().isEmpty()); Assert.assertNull(scan.getAttribute("absent")); scan.setAttribute("absent",null); Assert.assertTrue(scan.getAttributesMap().isEmpty()); Assert.assertNull(scan.getAttribute("absent")); scan.setAttribute("attribute1",Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),scan.getAttribute("attribute1"))); Assert.assertEquals(1,scan.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),scan.getAttributesMap().get("attribute1"))); scan.setAttribute("attribute1",Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),scan.getAttribute("attribute1"))); Assert.assertEquals(1,scan.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"),scan.getAttributesMap().get("attribute1"))); scan.setAttribute("attribute2",Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),scan.getAttribute("attribute2"))); Assert.assertEquals(2,scan.getAttributesMap().size()); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),scan.getAttributesMap().get("attribute2"))); scan.setAttribute("attribute2",null); Assert.assertNull(scan.getAttribute("attribute2")); Assert.assertEquals(1,scan.getAttributesMap().size()); Assert.assertNull(scan.getAttributesMap().get("attribute2")); scan.setAttribute("attribute2",null); Assert.assertNull(scan.getAttribute("attribute2")); Assert.assertEquals(1,scan.getAttributesMap().size()); Assert.assertNull(scan.getAttributesMap().get("attribute2")); scan.setAttribute("attribute1",null); Assert.assertNull(scan.getAttribute("attribute1")); Assert.assertTrue(scan.getAttributesMap().isEmpty()); Assert.assertNull(scan.getAttributesMap().get("attribute1")); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testNullQualifier(){ Scan scan=new Scan(); byte[] family=Bytes.toBytes("family"); scan.addColumn(family,null); Set qualifiers=scan.getFamilyMap().get(family); Assert.assertEquals(1,qualifiers.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAttributesSerialization() throws IOException { Scan scan=new Scan(); scan.setAttribute("attribute1",Bytes.toBytes("value1")); scan.setAttribute("attribute2",Bytes.toBytes("value2")); scan.setAttribute("attribute3",Bytes.toBytes("value3")); ClientProtos.Scan scanProto=ProtobufUtil.toScan(scan); Scan scan2=ProtobufUtil.toScan(scanProto); Assert.assertNull(scan2.getAttribute("absent")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"),scan2.getAttribute("attribute1"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"),scan2.getAttribute("attribute2"))); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value3"),scan2.getAttribute("attribute3"))); Assert.assertEquals(3,scan2.getAttributesMap().size()); }

Class: org.apache.hadoop.hbase.client.TestScannerTimeout

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that scanner can continue even if the region server it was reading * from failed. Before 2772, it reused the same scanner id. * @throws Exception */ @Test(timeout=300000) public void test2772() throws Exception { LOG.info("START************ test2772"); HRegionServer rs=TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); Scan scan=new Scan(); Configuration conf=new Configuration(TEST_UTIL.getConfiguration()); conf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,SCANNER_TIMEOUT * 100); Connection connection=ConnectionFactory.createConnection(conf); Table higherScanTimeoutTable=connection.getTable(TABLE_NAME); ResultScanner r=higherScanTimeoutTable.getScanner(scan); rs.abort("die!"); Result[] results=r.next(NB_ROWS); assertEquals(NB_ROWS,results.length); r.close(); higherScanTimeoutTable.close(); connection.close(); LOG.info("END ************ test2772"); }

Class: org.apache.hadoop.hbase.client.TestScannersFromClientSide

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test from client side for async scan * @throws Exception */ @Test public void testAsyncScanner() throws Exception { TableName TABLE=TableName.valueOf("testAsyncScan"); byte[][] ROWS=HTestConst.makeNAscii(ROW,2); byte[][] FAMILIES=HTestConst.makeNAscii(FAMILY,3); byte[][] QUALIFIERS=HTestConst.makeNAscii(QUALIFIER,10); Table ht=TEST_UTIL.createTable(TABLE,FAMILIES); Put put; Scan scan; Result result; boolean toLog=true; List kvListExp, kvListScan; kvListExp=new ArrayList(); for (int r=0; r < ROWS.length; r++) { put=new Put(ROWS[r]); for (int c=0; c < FAMILIES.length; c++) { for (int q=0; q < QUALIFIERS.length; q++) { KeyValue kv=new KeyValue(ROWS[r],FAMILIES[c],QUALIFIERS[q],1,VALUE); put.add(kv); kvListExp.add(kv); } } ht.put(put); } scan=new Scan(); scan.setAsyncPrefetch(true); ResultScanner scanner=ht.getScanner(scan); kvListScan=new ArrayList(); while ((result=scanner.next()) != null) { for ( Cell kv : result.listCells()) { kvListScan.add(kv); } } result=Result.create(kvListScan); assertTrue("Not instance of async scanner",scanner instanceof ClientAsyncPrefetchScanner); verifyResult(result,kvListExp,toLog,"Testing async scan"); }

InternalCallVerifier BooleanVerifier 
@Test public void testMaxResultSizeIsSetToDefault() throws Exception { TableName TABLE=TableName.valueOf("testMaxResultSizeIsSetToDefault"); Table ht=TEST_UTIL.createTable(TABLE,FAMILY); long expectedMaxResultSize=TEST_UTIL.getConfiguration().getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); int numRows=5; byte[][] ROWS=HTestConst.makeNAscii(ROW,numRows); int numQualifiers=10; byte[][] QUALIFIERS=HTestConst.makeNAscii(QUALIFIER,numQualifiers); int cellSize=(int)(expectedMaxResultSize / (numQualifiers - 1)); byte[] cellValue=Bytes.createMaxByteArray(cellSize); Put put; List puts=new ArrayList(); for (int row=0; row < ROWS.length; row++) { put=new Put(ROWS[row]); for (int qual=0; qual < QUALIFIERS.length; qual++) { KeyValue kv=new KeyValue(ROWS[row],FAMILY,QUALIFIERS[qual],cellValue); put.add(kv); } puts.add(put); } ht.put(puts); Scan scan=new Scan(); ResultScanner scanner=ht.getScanner(scan); assertTrue(scanner instanceof ClientScanner); ClientScanner clientScanner=(ClientScanner)scanner; scanner.next(); assertTrue("The cache contains: " + clientScanner.getCacheSize() + " results",clientScanner.getCacheSize() <= 1); }

Class: org.apache.hadoop.hbase.client.TestShortCircuitConnection

InternalCallVerifier BooleanVerifier 
@Test @SuppressWarnings("deprecation") public void testShortCircuitConnection() throws IOException, InterruptedException { TableName tn=TableName.valueOf("testShortCircuitConnection"); HTableDescriptor htd=UTIL.createTableDescriptor(tn); HColumnDescriptor hcd=new HColumnDescriptor(Bytes.toBytes("cf")); htd.addFamily(hcd); UTIL.createTable(htd,null); HRegionServer regionServer=UTIL.getRSForFirstRegionInTable(tn); ClusterConnection connection=regionServer.getClusterConnection(); HTableInterface tableIf=connection.getTable(tn); assertTrue(tableIf instanceof HTable); HTable table=(HTable)tableIf; assertTrue(table.getConnection() == connection); AdminService.BlockingInterface admin=connection.getAdmin(regionServer.getServerName()); ClientService.BlockingInterface client=connection.getClient(regionServer.getServerName()); assertTrue(admin instanceof RSRpcServices); assertTrue(client instanceof RSRpcServices); ServerName anotherSn=ServerName.valueOf(regionServer.getServerName().getHostAndPort(),EnvironmentEdgeManager.currentTime()); admin=connection.getAdmin(anotherSn); client=connection.getClient(anotherSn); assertFalse(admin instanceof RSRpcServices); assertFalse(client instanceof RSRpcServices); assertTrue(connection.getAdmin().getConnection() == connection); }

Class: org.apache.hadoop.hbase.client.TestSizeFailures

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Basic client side validation of HBASE-13262 */ @Test public void testScannerSeesAllRecords() throws Exception { Connection conn=TEST_UTIL.getConnection(); try (Table table=conn.getTable(TABLENAME)){ Scan s=new Scan(); s.addFamily(FAMILY); s.setMaxResultSize(-1); s.setBatch(-1); s.setCaching(500); Entry entry=sumTable(table.getScanner(s)); long rowsObserved=entry.getKey(); long entriesObserved=entry.getValue(); assertEquals(NUM_ROWS,rowsObserved); assertEquals(NUM_ROWS * NUM_COLS,entriesObserved); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Basic client side validation of HBASE-13262 */ @Test public void testSmallScannerSeesAllRecords() throws Exception { Connection conn=TEST_UTIL.getConnection(); try (Table table=conn.getTable(TABLENAME)){ Scan s=new Scan(); s.setSmall(true); s.addFamily(FAMILY); s.setMaxResultSize(-1); s.setBatch(-1); s.setCaching(500); Entry entry=sumTable(table.getScanner(s)); long rowsObserved=entry.getKey(); long entriesObserved=entry.getValue(); assertEquals(NUM_ROWS,rowsObserved); assertEquals(NUM_ROWS * NUM_COLS,entriesObserved); } }

Class: org.apache.hadoop.hbase.client.TestSnapshotFromClient

InternalCallVerifier EqualityVerifier 
/** * Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose names match the parameter * @throws Exception */ @Test(timeout=300000) public void testSnapshotDeletionWithRegex() throws Exception { Admin admin=UTIL.getHBaseAdmin(); SnapshotTestingUtils.assertNoSnapshots(admin); Table table=UTIL.getConnection().getTable(TABLE_NAME); UTIL.loadTable(table,TEST_FAM); table.close(); byte[] snapshot1=Bytes.toBytes("TableSnapshot1"); admin.snapshot(snapshot1,TABLE_NAME); LOG.debug("Snapshot1 completed."); byte[] snapshot2=Bytes.toBytes("TableSnapshot2"); admin.snapshot(snapshot2,TABLE_NAME); LOG.debug("Snapshot2 completed."); String snapshot3="3rdTableSnapshot"; admin.snapshot(Bytes.toBytes(snapshot3),TABLE_NAME); LOG.debug(snapshot3 + " completed."); admin.deleteSnapshots("TableSnapshot.*"); List snapshots=admin.listSnapshots(); assertEquals(1,snapshots.size()); assertEquals(snapshots.get(0).getName(),snapshot3); admin.deleteSnapshot(snapshot3); admin.close(); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testDeleteTableSnapshotsWithRegex() throws Exception { Admin admin=null; try { admin=UTIL.getHBaseAdmin(); String table1Snapshot1="Table1Snapshot1"; admin.snapshot(table1Snapshot1,TABLE_NAME); LOG.debug("Snapshot1 completed."); String table1Snapshot2="Table1Snapshot2"; admin.snapshot(table1Snapshot2,TABLE_NAME); LOG.debug("Snapshot2 completed."); String table2Snapshot1="Table2Snapshot1"; admin.snapshot(Bytes.toBytes(table2Snapshot1),TABLE_NAME); LOG.debug(table2Snapshot1 + " completed."); admin.deleteTableSnapshots("test.*","Table1.*"); assertEquals(1,admin.listTableSnapshots("test.*",".*").size()); } finally { if (admin != null) { try { admin.deleteTableSnapshots("test.*",".*"); } catch ( SnapshotDoesNotExistException ignore) { } admin.close(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testListTableSnapshots() throws Exception { Admin admin=null; TableName tableName2=TableName.valueOf("testListTableSnapshots"); try { admin=UTIL.getHBaseAdmin(); HTableDescriptor htd=new HTableDescriptor(tableName2); UTIL.createTable(htd,new byte[][]{TEST_FAM},UTIL.getConfiguration()); String table1Snapshot1="Table1Snapshot1"; admin.snapshot(table1Snapshot1,TABLE_NAME); LOG.debug("Snapshot1 completed."); String table1Snapshot2="Table1Snapshot2"; admin.snapshot(table1Snapshot2,TABLE_NAME); LOG.debug("Snapshot2 completed."); String table2Snapshot1="Table2Snapshot1"; admin.snapshot(Bytes.toBytes(table2Snapshot1),tableName2); LOG.debug(table2Snapshot1 + " completed."); List listTableSnapshots=admin.listTableSnapshots("test.*",".*"); List listTableSnapshotNames=new ArrayList(); assertEquals(3,listTableSnapshots.size()); for ( SnapshotDescription s : listTableSnapshots) { listTableSnapshotNames.add(s.getName()); } assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); assertTrue(listTableSnapshotNames.contains(table2Snapshot1)); } finally { if (admin != null) { try { admin.deleteSnapshots("Table.*"); } catch ( SnapshotDoesNotExistException ignore) { } if (admin.tableExists(tableName2)) { UTIL.deleteTable(tableName2); } admin.close(); } } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testDeleteTableSnapshots() throws Exception { Admin admin=null; TableName tableName2=TableName.valueOf("testListTableSnapshots"); try { admin=UTIL.getHBaseAdmin(); HTableDescriptor htd=new HTableDescriptor(tableName2); UTIL.createTable(htd,new byte[][]{TEST_FAM},UTIL.getConfiguration()); String table1Snapshot1="Table1Snapshot1"; admin.snapshot(table1Snapshot1,TABLE_NAME); LOG.debug("Snapshot1 completed."); String table1Snapshot2="Table1Snapshot2"; admin.snapshot(table1Snapshot2,TABLE_NAME); LOG.debug("Snapshot2 completed."); String table2Snapshot1="Table2Snapshot1"; admin.snapshot(Bytes.toBytes(table2Snapshot1),tableName2); LOG.debug(table2Snapshot1 + " completed."); admin.deleteTableSnapshots("test.*",".*"); assertEquals(0,admin.listTableSnapshots("test.*",".*").size()); } finally { if (admin != null) { if (admin.tableExists(tableName2)) { UTIL.deleteTable(tableName2); } admin.close(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testListTableSnapshotsWithRegex() throws Exception { Admin admin=null; try { admin=UTIL.getHBaseAdmin(); String table1Snapshot1="Table1Snapshot1"; admin.snapshot(table1Snapshot1,TABLE_NAME); LOG.debug("Snapshot1 completed."); String table1Snapshot2="Table1Snapshot2"; admin.snapshot(table1Snapshot2,TABLE_NAME); LOG.debug("Snapshot2 completed."); String table2Snapshot1="Table2Snapshot1"; admin.snapshot(Bytes.toBytes(table2Snapshot1),TABLE_NAME); LOG.debug(table2Snapshot1 + " completed."); List listTableSnapshots=admin.listTableSnapshots("test.*","Table1.*"); List listTableSnapshotNames=new ArrayList(); assertEquals(2,listTableSnapshots.size()); for ( SnapshotDescription s : listTableSnapshots) { listTableSnapshotNames.add(s.getName()); } assertTrue(listTableSnapshotNames.contains(table1Snapshot1)); assertTrue(listTableSnapshotNames.contains(table1Snapshot2)); assertFalse(listTableSnapshotNames.contains(table2Snapshot1)); } finally { if (admin != null) { try { admin.deleteSnapshots("Table.*"); } catch ( SnapshotDoesNotExistException ignore) { } admin.close(); } } }

Class: org.apache.hadoop.hbase.client.TestSnapshotMetadata

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Verify that the describe for a cloned table matches the describe from the original. */ @Test(timeout=300000) public void testDescribeMatchesAfterClone() throws Exception { final String clonedTableNameAsString="clone" + originalTableName; final TableName clonedTableName=TableName.valueOf(clonedTableNameAsString); final String snapshotNameAsString="snapshot" + originalTableName + System.currentTimeMillis(); final byte[] snapshotName=Bytes.toBytes(snapshotNameAsString); List familiesList=new ArrayList(); Collections.addAll(familiesList,families); SnapshotTestingUtils.createSnapshotAndValidate(admin,originalTableName,null,familiesList,snapshotNameAsString,rootDir,fs,false); admin.cloneSnapshot(snapshotName,clonedTableName); Table clonedTable=UTIL.getConnection().getTable(clonedTableName); HTableDescriptor cloneHtd=admin.getTableDescriptor(clonedTableName); assertEquals(originalTableDescription.replace(originalTableName.getNameAsString(),clonedTableNameAsString),cloneHtd.toStringCustomizedValues()); assertEquals(originalTableDescriptor.getValues().size(),cloneHtd.getValues().size()); assertEquals(originalTableDescriptor.getConfiguration().size(),cloneHtd.getConfiguration().size()); assertEquals(cloneHtd.getValue(TEST_CUSTOM_VALUE),TEST_CUSTOM_VALUE); assertEquals(cloneHtd.getConfigurationValue(TEST_CONF_CUSTOM_VALUE),TEST_CONF_CUSTOM_VALUE); assertEquals(originalTableDescriptor.getValues(),cloneHtd.getValues()); assertEquals(originalTableDescriptor.getConfiguration(),cloneHtd.getConfiguration()); admin.enableTable(originalTableName); clonedTable.close(); }

Class: org.apache.hadoop.hbase.client.TestTimestampsFilter

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test from client side for TimestampsFilter. * The TimestampsFilter provides the ability to request cells (KeyValues) * whose timestamp/version is in the specified list of timestamps/version. * @throws Exception */ @Test public void testTimestampsFilter() throws Exception { byte[] TABLE=Bytes.toBytes("testTimestampsFilter"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Cell kvs[]; Table ht=TEST_UTIL.createTable(TableName.valueOf(TABLE),FAMILIES,Integer.MAX_VALUE); for (int rowIdx=0; rowIdx < 5; rowIdx++) { for (int colIdx=0; colIdx < 5; colIdx++) { putNVersions(ht,FAMILY,rowIdx,colIdx,201,300); putNVersions(ht,FAMILY,rowIdx,colIdx,1,100); } } verifyInsertedValues(ht,FAMILY); TEST_UTIL.flush(); verifyInsertedValues(ht,FAMILY); for (int rowIdx=0; rowIdx < 5; rowIdx++) { for (int colIdx=0; colIdx < 5; colIdx++) { putNVersions(ht,FAMILY,rowIdx,colIdx,301,400); putNVersions(ht,FAMILY,rowIdx,colIdx,101,200); } } for (int rowIdx=0; rowIdx < 5; rowIdx++) { for (int colIdx=0; colIdx < 5; colIdx++) { kvs=getNVersions(ht,FAMILY,rowIdx,colIdx,Arrays.asList(505L,5L,105L,305L,205L)); assertEquals(4,kvs.length); checkOneCell(kvs[0],FAMILY,rowIdx,colIdx,305); checkOneCell(kvs[1],FAMILY,rowIdx,colIdx,205); checkOneCell(kvs[2],FAMILY,rowIdx,colIdx,105); checkOneCell(kvs[3],FAMILY,rowIdx,colIdx,5); } } kvs=getNVersions(ht,FAMILY,2,2,new ArrayList()); assertEquals(0,kvs == null ? 0 : kvs.length); Result[] results=scanNVersions(ht,FAMILY,0,4,Arrays.asList(6L,106L,306L)); assertEquals("# of rows returned from scan",5,results.length); for (int rowIdx=0; rowIdx < 5; rowIdx++) { kvs=results[rowIdx].rawCells(); assertEquals("Number of KeyValues in result for row:" + rowIdx,3 * 5,kvs.length); for (int colIdx=0; colIdx < 5; colIdx++) { int offset=colIdx * 3; checkOneCell(kvs[offset + 0],FAMILY,rowIdx,colIdx,306); checkOneCell(kvs[offset + 1],FAMILY,rowIdx,colIdx,106); checkOneCell(kvs[offset + 2],FAMILY,rowIdx,colIdx,6); } } ht.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiColumns() throws Exception { byte[] TABLE=Bytes.toBytes("testTimestampsFilterMultiColumns"); byte[] FAMILY=Bytes.toBytes("event_log"); byte[][] FAMILIES=new byte[][]{FAMILY}; Table ht=TEST_UTIL.createTable(TableName.valueOf(TABLE),FAMILIES,Integer.MAX_VALUE); Put p=new Put(Bytes.toBytes("row")); p.addColumn(FAMILY,Bytes.toBytes("column0"),(long)3,Bytes.toBytes("value0-3")); p.addColumn(FAMILY,Bytes.toBytes("column1"),(long)3,Bytes.toBytes("value1-3")); p.addColumn(FAMILY,Bytes.toBytes("column2"),(long)1,Bytes.toBytes("value2-1")); p.addColumn(FAMILY,Bytes.toBytes("column2"),(long)2,Bytes.toBytes("value2-2")); p.addColumn(FAMILY,Bytes.toBytes("column2"),(long)3,Bytes.toBytes("value2-3")); p.addColumn(FAMILY,Bytes.toBytes("column3"),(long)2,Bytes.toBytes("value3-2")); p.addColumn(FAMILY,Bytes.toBytes("column4"),(long)1,Bytes.toBytes("value4-1")); p.addColumn(FAMILY,Bytes.toBytes("column4"),(long)2,Bytes.toBytes("value4-2")); p.addColumn(FAMILY,Bytes.toBytes("column4"),(long)3,Bytes.toBytes("value4-3")); ht.put(p); ArrayList timestamps=new ArrayList(); timestamps.add(new Long(3)); TimestampsFilter filter=new TimestampsFilter(timestamps); Get g=new Get(Bytes.toBytes("row")); g.setFilter(filter); g.setMaxVersions(); g.addColumn(FAMILY,Bytes.toBytes("column2")); g.addColumn(FAMILY,Bytes.toBytes("column4")); Result result=ht.get(g); for ( Cell kv : result.listCells()) { System.out.println("found row " + Bytes.toString(CellUtil.cloneRow(kv)) + ", column "+ Bytes.toString(CellUtil.cloneQualifier(kv))+ ", value "+ Bytes.toString(CellUtil.cloneValue(kv))); } assertEquals(result.listCells().size(),2); assertTrue(CellUtil.matchingValue(result.listCells().get(0),Bytes.toBytes("value2-3"))); assertTrue(CellUtil.matchingValue(result.listCells().get(1),Bytes.toBytes("value4-3"))); ht.close(); }

Class: org.apache.hadoop.hbase.client.TestUpdateConfiguration

InternalCallVerifier EqualityVerifier 
@Test public void testMasterOnlineConfigChange() throws IOException { LOG.debug("Starting the test"); Path cnfPath=FileSystems.getDefault().getPath("target/test-classes/hbase-site.xml"); Path cnf2Path=FileSystems.getDefault().getPath("target/test-classes/hbase-site2.xml"); Path cnf3Path=FileSystems.getDefault().getPath("target/test-classes/hbase-site3.xml"); Files.copy(cnfPath,cnf3Path,StandardCopyOption.REPLACE_EXISTING); Files.copy(cnf2Path,cnfPath,StandardCopyOption.REPLACE_EXISTING); Admin admin=TEST_UTIL.getHBaseAdmin(); ServerName server=TEST_UTIL.getHBaseCluster().getMaster().getServerName(); admin.updateConfiguration(server); Configuration conf=TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration(); int custom=conf.getInt("hbase.custom.config",0); assertEquals(custom,1000); Files.copy(cnf3Path,cnfPath,StandardCopyOption.REPLACE_EXISTING); }

Class: org.apache.hadoop.hbase.client.replication.TestReplicationAdmin

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * basic checks that when we add a peer that it is enabled, and that we can disable * @throws Exception */ @Test public void testEnableDisable() throws Exception { admin.addPeer(ID_ONE,KEY_ONE); assertEquals(1,admin.getPeersCount()); assertTrue(admin.getPeerState(ID_ONE)); admin.disablePeer(ID_ONE); assertFalse(admin.getPeerState(ID_ONE)); try { admin.getPeerState(ID_SECOND); } catch ( IllegalArgumentException iae) { } admin.removePeer(ID_ONE); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAppendPeerTableCFs() throws Exception { admin.addPeer(ID_ONE,KEY_ONE); admin.appendPeerTableCFs(ID_ONE,"t1"); assertEquals("t1",admin.getPeerTableCFs(ID_ONE)); admin.appendPeerTableCFs(ID_ONE,"t2"); String peerTablesOne=admin.getPeerTableCFs(ID_ONE); assertTrue("Should contain t1",peerTablesOne.contains("t1")); assertTrue("Should contain t2",peerTablesOne.contains("t2")); assertTrue("Should contain ; as the seperator",peerTablesOne.contains(";")); admin.appendPeerTableCFs(ID_ONE,"t3:f1"); String peerTablesTwo=admin.getPeerTableCFs(ID_ONE); assertTrue("Should contain t1",peerTablesTwo.contains("t1")); assertTrue("Should contain t2",peerTablesTwo.contains("t2")); assertTrue("Should contain t3:f1",peerTablesTwo.contains("t3:f1")); assertTrue("Should contain ; as the seperator",peerTablesTwo.contains(";")); admin.removePeer(ID_ONE); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that the peer configuration used by ReplicationAdmin contains all * the peer's properties. */ @Test public void testPeerConfig() throws Exception { ReplicationPeerConfig config=new ReplicationPeerConfig(); config.setClusterKey(KEY_ONE); config.getConfiguration().put("key1","value1"); config.getConfiguration().put("key2","value2"); admin.addPeer(ID_ONE,config,null); List peers=admin.listValidReplicationPeers(); assertEquals(1,peers.size()); ReplicationPeer peerOne=peers.get(0); assertNotNull(peerOne); assertEquals("value1",peerOne.getConfiguration().get("key1")); assertEquals("value2",peerOne.getConfiguration().get("key2")); admin.removePeer(ID_ONE); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAddPeerWithUnDeletedQueues() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); ZooKeeperWatcher zkw=new ZooKeeperWatcher(conf,"Test HBaseAdmin",null); ReplicationQueues repQueues=ReplicationFactory.getReplicationQueues(zkw,conf,null); repQueues.init("server1"); repQueues.addLog(ID_ONE,"file1"); try { admin.addPeer(ID_ONE,KEY_ONE); fail(); } catch ( ReplicationException e) { } repQueues.removeQueue(ID_ONE); assertEquals(0,repQueues.getAllQueues().size()); repQueues.addLog(ID_ONE + "-server2","file1"); try { admin.addPeer(ID_ONE,KEY_ONE); fail(); } catch ( ReplicationException e) { } repQueues.removeAllQueues(); zkw.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemovePeerTableCFs() throws Exception { admin.addPeer(ID_ONE,KEY_ONE); try { admin.removePeerTableCFs(ID_ONE,"t3"); assertTrue(false); } catch ( ReplicationException e) { } assertEquals("",admin.getPeerTableCFs(ID_ONE)); admin.setPeerTableCFs(ID_ONE,"t1;t2:cf1"); try { admin.removePeerTableCFs(ID_ONE,"t3"); assertTrue(false); } catch ( ReplicationException e) { } assertEquals("t1;t2:cf1",admin.getPeerTableCFs(ID_ONE)); try { admin.removePeerTableCFs(ID_ONE,"t1:f1"); assertTrue(false); } catch ( ReplicationException e) { } admin.removePeerTableCFs(ID_ONE,"t1"); assertEquals("t2:cf1",admin.getPeerTableCFs(ID_ONE)); try { admin.removePeerTableCFs(ID_ONE,"t2"); assertTrue(false); } catch ( ReplicationException e) { } admin.removePeerTableCFs(ID_ONE,"t2:cf1"); assertEquals("",admin.getPeerTableCFs(ID_ONE)); admin.removePeer(ID_ONE); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Simple testing of adding and removing peers, basically shows that * all interactions with ZK work * @throws Exception */ @Test public void testAddRemovePeer() throws Exception { admin.addPeer(ID_ONE,KEY_ONE); try { admin.addPeer(ID_ONE,KEY_ONE); } catch ( IllegalArgumentException iae) { } assertEquals(1,admin.getPeersCount()); try { admin.removePeer(ID_SECOND); fail(); } catch ( IllegalArgumentException iae) { } assertEquals(1,admin.getPeersCount()); try { admin.addPeer(ID_SECOND,KEY_SECOND); } catch ( IllegalStateException iae) { fail(); } assertEquals(2,admin.getPeersCount()); admin.removePeer(ID_ONE); assertEquals(1,admin.getPeersCount()); admin.removePeer(ID_SECOND); assertEquals(0,admin.getPeersCount()); }

Class: org.apache.hadoop.hbase.client.replication.TestReplicationAdminWithClusters

InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testEnableReplicationForExplicitSetTableCfs() throws Exception { TableName tn=TableName.valueOf("testEnableReplicationForSetTableCfs"); String peerId="2"; if (admin2.isTableAvailable(tableName)) { admin2.disableTable(tableName); admin2.deleteTable(tableName); } assertFalse("Table should not exists in the peer cluster",admin2.isTableAvailable(tableName)); Map> tableCfs=new HashMap>(); tableCfs.put(tn,null); try { adminExt.setPeerTableCFs(peerId,tableCfs); adminExt.enableTableRep(tableName); assertFalse("Table should not be created if user has set table cfs explicitly for the " + "peer and this is not part of that collection",admin2.isTableAvailable(tableName)); tableCfs.put(tableName,null); adminExt.setPeerTableCFs(peerId,tableCfs); adminExt.enableTableRep(tableName); assertTrue("Table should be created if user has explicitly added table into table cfs collection",admin2.isTableAvailable(tableName)); } finally { adminExt.removePeerTableCFs(peerId,adminExt.getPeerTableCFs(peerId)); adminExt.disableTableRep(tableName); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testEnableReplicationWhenSlaveClusterDoesntHaveTable() throws Exception { admin2.disableTable(tableName); admin2.deleteTable(tableName); assertFalse(admin2.tableExists(tableName)); adminExt.enableTableRep(tableName); assertTrue(admin2.tableExists(tableName)); }

Class: org.apache.hadoop.hbase.codec.TestCellCodec

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOne() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); Codec codec=new CellCodec(); Codec.Encoder encoder=codec.getEncoder(dos); final KeyValue kv=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("q"),Bytes.toBytes("v")); kv.setSequenceId(Long.MAX_VALUE); encoder.write(kv); encoder.flush(); dos.close(); long offset=cos.getCount(); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=codec.getDecoder(dis); assertTrue(decoder.advance()); assertFalse(decoder.advance()); dis.close(); assertEquals(offset,cis.getCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThree() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); Codec codec=new CellCodec(); Codec.Encoder encoder=codec.getEncoder(dos); final KeyValue kv1=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("1"),Bytes.toBytes("1")); final KeyValue kv2=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("2"),Bytes.toBytes("2")); final KeyValue kv3=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("3"),Bytes.toBytes("3")); encoder.write(kv1); encoder.write(kv2); encoder.write(kv3); encoder.flush(); dos.close(); long offset=cos.getCount(); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c=decoder.current(); assertTrue(CellUtil.equals(c,kv1)); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,kv2)); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset,cis.getCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyWorks() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); Codec codec=new CellCodec(); Codec.Encoder encoder=codec.getEncoder(dos); encoder.flush(); dos.close(); long offset=cos.getCount(); assertEquals(0,offset); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=codec.getDecoder(dis); assertFalse(decoder.advance()); dis.close(); assertEquals(0,cis.getCount()); }

Class: org.apache.hadoop.hbase.codec.TestCellCodecWithTags

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCellWithTag() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); Codec codec=new CellCodecWithTags(); Codec.Encoder encoder=codec.getEncoder(dos); final Cell cell1=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("1"),HConstants.LATEST_TIMESTAMP,Bytes.toBytes("1"),new Tag[]{new ArrayBackedTag((byte)1,Bytes.toBytes("teststring1")),new ArrayBackedTag((byte)2,Bytes.toBytes("teststring2"))}); final Cell cell2=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("2"),HConstants.LATEST_TIMESTAMP,Bytes.toBytes("2"),new Tag[]{new ArrayBackedTag((byte)1,Bytes.toBytes("teststring3"))}); final Cell cell3=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("3"),HConstants.LATEST_TIMESTAMP,Bytes.toBytes("3"),new Tag[]{new ArrayBackedTag((byte)2,Bytes.toBytes("teststring4")),new ArrayBackedTag((byte)2,Bytes.toBytes("teststring5")),new ArrayBackedTag((byte)1,Bytes.toBytes("teststring6"))}); encoder.write(cell1); encoder.write(cell2); encoder.write(cell3); encoder.flush(); dos.close(); long offset=cos.getCount(); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c=decoder.current(); assertTrue(CellUtil.equals(c,cell1)); List tags=TagUtil.asList(c.getTagsArray(),c.getTagsOffset(),c.getTagsLength()); assertEquals(2,tags.size()); Tag tag=tags.get(0); assertEquals(1,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring1"),TagUtil.cloneValue(tag))); tag=tags.get(1); assertEquals(2,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring2"),TagUtil.cloneValue(tag))); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,cell2)); tags=TagUtil.asList(c.getTagsArray(),c.getTagsOffset(),c.getTagsLength()); assertEquals(1,tags.size()); tag=tags.get(0); assertEquals(1,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring3"),TagUtil.cloneValue(tag))); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,cell3)); tags=TagUtil.asList(c.getTagsArray(),c.getTagsOffset(),c.getTagsLength()); assertEquals(3,tags.size()); tag=tags.get(0); assertEquals(2,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring4"),TagUtil.cloneValue(tag))); tag=tags.get(1); assertEquals(2,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring5"),TagUtil.cloneValue(tag))); tag=tags.get(2); assertEquals(1,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring6"),TagUtil.cloneValue(tag))); assertFalse(decoder.advance()); dis.close(); assertEquals(offset,cis.getCount()); }

Class: org.apache.hadoop.hbase.codec.TestCellMessageCodec

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyWorks() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); MessageCodec cmc=new MessageCodec(); Codec.Encoder encoder=cmc.getEncoder(dos); encoder.flush(); dos.close(); long offset=cos.getCount(); assertEquals(0,offset); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=cmc.getDecoder(dis); assertFalse(decoder.advance()); dis.close(); assertEquals(0,cis.getCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThree() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); MessageCodec cmc=new MessageCodec(); Codec.Encoder encoder=cmc.getEncoder(dos); final KeyValue kv1=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("1"),Bytes.toBytes("1")); final KeyValue kv2=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("2"),Bytes.toBytes("2")); final KeyValue kv3=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("3"),Bytes.toBytes("3")); encoder.write(kv1); encoder.write(kv2); encoder.write(kv3); encoder.flush(); dos.close(); long offset=cos.getCount(); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=cmc.getDecoder(dis); assertTrue(decoder.advance()); Cell c=decoder.current(); assertTrue(CellUtil.equals(c,kv1)); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,kv2)); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,kv3)); assertFalse(decoder.advance()); dis.close(); assertEquals(offset,cis.getCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOne() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); MessageCodec cmc=new MessageCodec(); Codec.Encoder encoder=cmc.getEncoder(dos); final KeyValue kv=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("q"),Bytes.toBytes("v")); encoder.write(kv); encoder.flush(); dos.close(); long offset=cos.getCount(); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=cmc.getDecoder(dis); assertTrue(decoder.advance()); assertFalse(decoder.advance()); dis.close(); assertEquals(offset,cis.getCount()); }

Class: org.apache.hadoop.hbase.codec.TestKeyValueCodec

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOne() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); KeyValueCodec kvc=new KeyValueCodec(); Codec.Encoder encoder=kvc.getEncoder(dos); final KeyValue kv=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("q"),Bytes.toBytes("v")); final long length=kv.getLength() + Bytes.SIZEOF_INT; encoder.write(kv); encoder.flush(); dos.close(); long offset=cos.getCount(); assertEquals(length,offset); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=kvc.getDecoder(dis); assertTrue(decoder.advance()); assertFalse(decoder.advance()); dis.close(); assertEquals(length,cis.getCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThree() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); KeyValueCodec kvc=new KeyValueCodec(); Codec.Encoder encoder=kvc.getEncoder(dos); final KeyValue kv1=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("1"),Bytes.toBytes("1")); final KeyValue kv2=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("2"),Bytes.toBytes("2")); final KeyValue kv3=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("3"),Bytes.toBytes("3")); final long length=kv1.getLength() + Bytes.SIZEOF_INT; encoder.write(kv1); encoder.write(kv2); encoder.write(kv3); encoder.flush(); dos.close(); long offset=cos.getCount(); assertEquals(length * 3,offset); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=kvc.getDecoder(dis); assertTrue(decoder.advance()); KeyValue kv=(KeyValue)decoder.current(); assertTrue(kv1.equals(kv)); assertTrue(decoder.advance()); kv=(KeyValue)decoder.current(); assertTrue(kv2.equals(kv)); assertTrue(decoder.advance()); kv=(KeyValue)decoder.current(); assertTrue(kv3.equals(kv)); assertFalse(decoder.advance()); dis.close(); assertEquals((length * 3),cis.getCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyWorks() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); KeyValueCodec kvc=new KeyValueCodec(); Codec.Encoder encoder=kvc.getEncoder(dos); encoder.flush(); dos.close(); long offset=cos.getCount(); assertEquals(0,offset); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=kvc.getDecoder(dis); assertFalse(decoder.advance()); dis.close(); assertEquals(0,cis.getCount()); }

Class: org.apache.hadoop.hbase.codec.TestKeyValueCodecWithTags

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKeyValueWithTag() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); CountingOutputStream cos=new CountingOutputStream(baos); DataOutputStream dos=new DataOutputStream(cos); Codec codec=new KeyValueCodecWithTags(); Codec.Encoder encoder=codec.getEncoder(dos); final KeyValue kv1=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("1"),HConstants.LATEST_TIMESTAMP,Bytes.toBytes("1"),new Tag[]{new ArrayBackedTag((byte)1,Bytes.toBytes("teststring1")),new ArrayBackedTag((byte)2,Bytes.toBytes("teststring2"))}); final KeyValue kv2=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("2"),HConstants.LATEST_TIMESTAMP,Bytes.toBytes("2"),new Tag[]{new ArrayBackedTag((byte)1,Bytes.toBytes("teststring3"))}); final KeyValue kv3=new KeyValue(Bytes.toBytes("r"),Bytes.toBytes("f"),Bytes.toBytes("3"),HConstants.LATEST_TIMESTAMP,Bytes.toBytes("3"),new Tag[]{new ArrayBackedTag((byte)2,Bytes.toBytes("teststring4")),new ArrayBackedTag((byte)2,Bytes.toBytes("teststring5")),new ArrayBackedTag((byte)1,Bytes.toBytes("teststring6"))}); encoder.write(kv1); encoder.write(kv2); encoder.write(kv3); encoder.flush(); dos.close(); long offset=cos.getCount(); CountingInputStream cis=new CountingInputStream(new ByteArrayInputStream(baos.toByteArray())); DataInputStream dis=new DataInputStream(cis); Codec.Decoder decoder=codec.getDecoder(dis); assertTrue(decoder.advance()); Cell c=decoder.current(); assertTrue(CellUtil.equals(c,kv1)); List tags=TagUtil.asList(c.getTagsArray(),c.getTagsOffset(),c.getTagsLength()); assertEquals(2,tags.size()); Tag tag=tags.get(0); assertEquals(1,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring1"),TagUtil.cloneValue(tag))); tag=tags.get(1); assertEquals(2,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring2"),TagUtil.cloneValue(tag))); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,kv2)); tags=TagUtil.asList(c.getTagsArray(),c.getTagsOffset(),c.getTagsLength()); assertEquals(1,tags.size()); tag=tags.get(0); assertEquals(1,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring3"),TagUtil.cloneValue(tag))); assertTrue(decoder.advance()); c=decoder.current(); assertTrue(CellUtil.equals(c,kv3)); tags=TagUtil.asList(c.getTagsArray(),c.getTagsOffset(),c.getTagsLength()); assertEquals(3,tags.size()); tag=tags.get(0); assertEquals(2,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring4"),TagUtil.cloneValue(tag))); tag=tags.get(1); assertEquals(2,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring5"),TagUtil.cloneValue(tag))); tag=tags.get(2); assertEquals(1,tag.getType()); assertTrue(Bytes.equals(Bytes.toBytes("teststring6"),TagUtil.cloneValue(tag))); assertFalse(decoder.advance()); dis.close(); assertEquals(offset,cis.getCount()); }

Class: org.apache.hadoop.hbase.codec.keyvalue.TestKeyValueTool

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testRoundTripToBytes(){ if (rows instanceof TestRowDataTrivialWithTags || rows instanceof TestRowDataRandomKeyValuesWithTags) { return; } List kvs=rows.getInputs(); ByteBuffer bb=KeyValueTestUtil.toByteBufferAndRewind(kvs,false); List roundTrippedKvs=KeyValueTestUtil.rewindThenToList(bb,false,false); Assert.assertArrayEquals(kvs.toArray(),roundTrippedKvs.toArray()); }

Class: org.apache.hadoop.hbase.codec.prefixtree.blockmeta.TestBlockMeta

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testStreamSerialization() throws IOException { PrefixTreeBlockMeta original=createSample(); ByteArrayOutputStream os=new ByteArrayOutputStream(10000); original.writeVariableBytesToOutputStream(os); ByteBuffer buffer=ByteBuffer.wrap(os.toByteArray()); PrefixTreeBlockMeta roundTripped=new PrefixTreeBlockMeta(new SingleByteBuff(buffer)); Assert.assertTrue(original.equals(roundTripped)); }

Class: org.apache.hadoop.hbase.codec.prefixtree.builder.TestTokenizer

InternalCallVerifier BooleanVerifier 
@Test public void testSearching(){ for ( byte[] input : inputs) { TokenizerRowSearchResult resultHolder=new TokenizerRowSearchResult(); builder.getNode(resultHolder,input,0,input.length); TokenizerNode n=resultHolder.getMatchingNode(); byte[] output=n.getNewByteArray(); Assert.assertTrue(Bytes.equals(input,output)); } }

Class: org.apache.hadoop.hbase.codec.prefixtree.column.TestColumnBuilder

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * methods */ @Test public void testReaderRoundTrip() throws IOException { for (int i=0; i < sortedUniqueColumns.size(); ++i) { ByteRange column=sortedUniqueColumns.get(i); builder.addSorted(column); } List builderOutputArrays=builder.getArrays(); for (int i=0; i < builderOutputArrays.size(); ++i) { byte[] inputArray=sortedUniqueColumns.get(i).deepCopyToNewArray(); byte[] outputArray=builderOutputArrays.get(i); boolean same=Bytes.equals(inputArray,outputArray); Assert.assertTrue(same); } Assert.assertEquals(sortedUniqueColumns.size(),builderOutputArrays.size()); writer=new ColumnSectionWriter(blockMeta,builder,ColumnNodeType.QUALIFIER); ByteArrayOutputStream baos=new ByteArrayOutputStream(); writer.compile().writeBytes(baos); bytes=baos.toByteArray(); buffer=new byte[blockMeta.getMaxQualifierLength()]; reader=new ColumnReader(buffer,ColumnNodeType.QUALIFIER); reader.initOnBlock(blockMeta,new SingleByteBuff(ByteBuffer.wrap(bytes))); List builderNodes=Lists.newArrayList(); builder.appendNodes(builderNodes,true,true); int i=0; for ( TokenizerNode builderNode : builderNodes) { if (!builderNode.hasOccurrences()) { continue; } Assert.assertEquals(1,builderNode.getNumOccurrences()); int position=builderNode.getOutputArrayOffset(); byte[] output=reader.populateBuffer(position).copyBufferToNewArray(); boolean same=Bytes.equals(sortedUniqueColumns.get(i).deepCopyToNewArray(),output); Assert.assertTrue(same); ++i; } }

Class: org.apache.hadoop.hbase.codec.prefixtree.row.TestPrefixTreeSearcher

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSeekWithPrefix() throws IOException { if (!(rows instanceof TestRowDataSearchWithPrefix)) { return; } CellSearcher searcher=null; try { searcher=DecoderFactory.checkOut(block,true); KeyValue kv=rows.getInputs().get(1); KeyValue firstKVOnRow=KeyValueUtil.createFirstOnRow(Arrays.copyOfRange(kv.getRowArray(),kv.getRowOffset(),kv.getRowOffset() + kv.getRowLength() / 2)); CellScannerPosition position=searcher.positionAtOrAfter(firstKVOnRow); Assert.assertEquals(CellScannerPosition.AFTER,position); Assert.assertEquals(kv,searcher.current()); } finally { DecoderFactory.checkIn(searcher); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testRandomSeekHits() throws IOException { CellSearcher searcher=null; try { searcher=DecoderFactory.checkOut(block,true); for ( KeyValue kv : rows.getInputs()) { boolean hit=searcher.positionAt(kv); Assert.assertTrue(hit); Cell foundKv=searcher.current(); Assert.assertTrue(CellUtil.equals(kv,foundKv)); } } finally { DecoderFactory.checkIn(searcher); } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRandomSeekMisses() throws IOException { CellSearcher searcher=null; List rowStartIndexes=rows.getRowStartIndexes(); try { searcher=DecoderFactory.checkOut(block,true); for ( boolean beforeVsAfterOnMiss : new boolean[]{true,false}) { for (int i=0; i < rows.getInputs().size(); ++i) { KeyValue kv=rows.getInputs().get(i); Cell inputNextRow=CellUtil.createFirstOnNextRow(kv); CellScannerPosition position=beforeVsAfterOnMiss ? searcher.positionAtOrBefore(inputNextRow) : searcher.positionAtOrAfter(inputNextRow); boolean isFirstInRow=rowStartIndexes.contains(i); if (isFirstInRow) { int rowIndex=rowStartIndexes.indexOf(i); if (rowIndex < rowStartIndexes.size() - 1) { if (beforeVsAfterOnMiss) { Assert.assertEquals(CellScannerPosition.BEFORE,position); } else { Assert.assertEquals(CellScannerPosition.AFTER,position); } int expectedInputIndex=beforeVsAfterOnMiss ? rowStartIndexes.get(rowIndex + 1) - 1 : rowStartIndexes.get(rowIndex + 1); Assert.assertEquals(rows.getInputs().get(expectedInputIndex),searcher.current()); } } KeyValue inputPreviousKv=KeyValueUtil.previousKey(kv); boolean hit=searcher.positionAt(inputPreviousKv); Assert.assertFalse(hit); position=searcher.positionAtOrAfter(inputPreviousKv); if (CollectionUtils.isLastIndex(rows.getInputs(),i)) { Assert.assertTrue(CellScannerPosition.AFTER_LAST == position); } else { Assert.assertTrue(CellScannerPosition.AFTER == position); Assert.assertEquals(rows.getInputs().get(i + 1),searcher.current()); } } } } finally { DecoderFactory.checkIn(searcher); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testScanBackwards() throws IOException { CellSearcher searcher=null; try { searcher=DecoderFactory.checkOut(block,true); searcher.positionAfterLastCell(); int i=-1; while (searcher.previous()) { ++i; int oppositeIndex=rows.getInputs().size() - i - 1; KeyValue inputKv=rows.getInputs().get(oppositeIndex); KeyValue outputKv=KeyValueUtil.copyToNewKeyValue(searcher.current()); Assert.assertEquals(inputKv,outputKv); } Assert.assertEquals(rows.getInputs().size(),i + 1); } finally { DecoderFactory.checkIn(searcher); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanForwards() throws IOException { CellSearcher searcher=null; try { searcher=DecoderFactory.checkOut(block,true); int i=-1; while (searcher.advance()) { ++i; KeyValue inputCell=rows.getInputs().get(i); Cell outputCell=searcher.current(); Assert.assertEquals(inputCell,outputCell); Assert.assertEquals(outputCell,inputCell); Assert.assertTrue(CellUtil.equals(inputCell,outputCell)); } Assert.assertEquals(rows.getInputs().size(),i + 1); } finally { DecoderFactory.checkIn(searcher); } }

Class: org.apache.hadoop.hbase.codec.prefixtree.row.TestRowEncoder

InternalCallVerifier EqualityVerifier 
@Test public void testForwardScanner(){ int counter=-1; while (searcher.advance()) { ++counter; KeyValue inputKv=rows.getInputs().get(counter); KeyValue outputKv=KeyValueUtil.copyToNewKeyValue(searcher.current()); assertKeyAndValueEqual(inputKv,outputKv); } Assert.assertEquals(rows.getInputs().size(),counter + 1); }

InternalCallVerifier EqualityVerifier 
/** * Exercise the nubCellsRemain variable by calling next+previous. NubCellsRemain is basically * a special fan index. */ @Test public void testReverseScannerWithJitter(){ searcher.positionAfterLastCell(); int counter=-1; while (true) { boolean foundCell=searcher.previous(); if (!foundCell) { break; } ++counter; if (!searcher.isAfterLast()) { searcher.advance(); searcher.previous(); } int oppositeIndex=rows.getInputs().size() - counter - 1; KeyValue inputKv=rows.getInputs().get(oppositeIndex); KeyValue outputKv=KeyValueUtil.copyToNewKeyValue(searcher.current()); assertKeyAndValueEqual(inputKv,outputKv); } Assert.assertEquals(rows.getInputs().size(),counter + 1); }

InternalCallVerifier EqualityVerifier 
/** * probably not needed since testReverseScannerWithJitter() below is more thorough */ @Test public void testReverseScanner(){ searcher.positionAfterLastCell(); int counter=-1; while (searcher.previous()) { ++counter; int oppositeIndex=rows.getInputs().size() - counter - 1; KeyValue inputKv=rows.getInputs().get(oppositeIndex); KeyValue outputKv=KeyValueUtil.copyToNewKeyValue(searcher.current()); assertKeyAndValueEqual(inputKv,outputKv); } Assert.assertEquals(rows.getInputs().size(),counter + 1); }

Class: org.apache.hadoop.hbase.codec.prefixtree.timestamp.TestTimestampEncoder

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReaderRoundTrip(){ for (int i=0; i < timestamps.getOutputs().size(); ++i) { long input=timestamps.getOutputs().get(i); long output=decoder.getLong(i); Assert.assertEquals(input,output); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCompressorRoundTrip(){ long[] outputs=encoder.getSortedUniqueTimestamps(); for (int i=0; i < timestamps.getOutputs().size(); ++i) { long input=timestamps.getOutputs().get(i); long output=outputs[i]; Assert.assertEquals(input,output); } }

Class: org.apache.hadoop.hbase.conf.TestConfigurationManager

InternalCallVerifier BooleanVerifier 
/** * Test if observers get notified by the ConfigurationManager * when the Configuration is reloaded. */ @Test public void testCheckIfObserversNotified(){ Configuration conf=new Configuration(); ConfigurationManager cm=new ConfigurationManager(); DummyConfigurationObserver d1=new DummyConfigurationObserver(cm); cm.notifyAllObservers(conf); assertTrue(d1.wasNotifiedOnChange()); d1.resetNotifiedOnChange(); DummyConfigurationObserver d2=new DummyConfigurationObserver(cm); cm.notifyAllObservers(conf); assertTrue(d1.wasNotifiedOnChange()); d1.resetNotifiedOnChange(); assertTrue(d2.wasNotifiedOnChange()); d2.resetNotifiedOnChange(); d2.deregister(); cm.notifyAllObservers(conf); assertTrue(d1.wasNotifiedOnChange()); d1.resetNotifiedOnChange(); assertFalse(d2.wasNotifiedOnChange()); }

Class: org.apache.hadoop.hbase.constraint.TestConstraint

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that constraints will fail properly * @throws Exception */ @SuppressWarnings("unchecked") @Test(timeout=60000) public void testConstraintFails() throws Exception { HTableDescriptor desc=new HTableDescriptor(tableName); for ( byte[] family : new byte[][]{dummy,test}) { desc.addFamily(new HColumnDescriptor(family)); } Constraints.add(desc,AllFailConstraint.class); util.getHBaseAdmin().createTable(desc); Table table=util.getConnection().getTable(tableName); Put put=new Put(row1); byte[] qualifier=new byte[0]; put.addColumn(dummy,qualifier,"fail".getBytes()); LOG.warn("Doing put in table"); try { table.put(put); fail("This put should not have suceeded - AllFailConstraint was not run!"); } catch ( RetriesExhaustedWithDetailsException e) { List causes=e.getCauses(); assertEquals("More than one failure cause - should only be the failure constraint exception",1,causes.size()); Throwable t=causes.get(0); assertEquals(ConstraintException.class,t.getClass()); } table.close(); }

Class: org.apache.hadoop.hbase.constraint.TestConstraints

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testConfigurationPreserved() throws Throwable { Configuration conf=new Configuration(); conf.setBoolean("_ENABLED",false); conf.setLong("_PRIORITY",10); HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("table")); Constraints.add(desc,AlsoWorks.class,conf); Constraints.add(desc,WorksConstraint.class); assertFalse(Constraints.enabled(desc,AlsoWorks.class)); List constraints=Constraints.getConstraints(desc,this.getClass().getClassLoader()); for ( Constraint c : constraints) { Configuration storedConf=c.getConf(); if (c instanceof AlsoWorks) assertEquals(10,storedConf.getLong("_PRIORITY",-1)); else assertEquals(2,storedConf.getLong("_PRIORITY",-1)); } }

InternalCallVerifier BooleanVerifier 
/** * Test that Constraints are properly enabled, disabled, and removed * @throws Exception */ @SuppressWarnings("unchecked") @Test public void testEnableDisableRemove() throws Exception { HTableDescriptor desc=new HTableDescriptor(TableName.valueOf("table")); Constraints.add(desc,AllPassConstraint.class); assertTrue(Constraints.enabled(desc,AllPassConstraint.class)); assertTrue(desc.hasCoprocessor(ConstraintProcessor.class.getName())); Constraints.disable(desc); assertFalse(desc.hasCoprocessor(ConstraintProcessor.class.getName())); assertTrue(Constraints.enabled(desc,AllPassConstraint.class)); Constraints.remove(desc,AllPassConstraint.class); assertFalse(Constraints.has(desc,AllPassConstraint.class)); Constraints.add(desc,AllPassConstraint.class); Constraints.remove(desc); assertFalse(desc.hasCoprocessor(ConstraintProcessor.class.getName())); assertFalse(Constraints.has(desc,AllPassConstraint.class)); }

Class: org.apache.hadoop.hbase.coprocessor.TestAggregateProtocol

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testAvgWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new LongColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(9.5,avg,0); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMaxWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new LongColumnInterpreter(); long max=aClient.max(TEST_TABLE,ci,scan); assertEquals(14,max); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new LongColumnInterpreter(); Long min=null; min=aClient.min(TEST_TABLE,ci,scan); assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new LongColumnInterpreter(); long min=aClient.min(TEST_TABLE,ci,scan); assertEquals(6,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new LongColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(6 + 60,avg,0); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMedianWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new LongColumnInterpreter(); long median=aClient.median(TEST_TABLE,ci,scan); assertEquals(8L,median); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testSumWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new LongColumnInterpreter(); long sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(190,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Long min=null; Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[4]); scan.setStopRow(ROWS[2]); final ColumnInterpreter ci=new LongColumnInterpreter(); try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[17]); final ColumnInterpreter ci=new LongColumnInterpreter(); Double std=null; try { std=aClient.std(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,std); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMinWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(HConstants.EMPTY_START_ROW); scan.setStopRow(HConstants.EMPTY_END_ROW); final ColumnInterpreter ci=new LongColumnInterpreter(); Long min=aClient.min(TEST_TABLE,ci,scan); assertEquals(0l,min.longValue()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testRowCountWithPrefixFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new LongColumnInterpreter(); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); long rowCount=aClient.rowCount(TEST_TABLE,ci,scan); assertEquals(0,rowCount); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new LongColumnInterpreter(); Double avg=null; avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(Double.NaN,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setFilter(f); final ColumnInterpreter ci=new LongColumnInterpreter(); Double std=null; std=aClient.std(TEST_TABLE,ci,scan); assertEquals(Double.NaN,std,0); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new LongColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(2.87,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setFilter(f); final ColumnInterpreter ci=new LongColumnInterpreter(); Long sum=null; sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new LongColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(63.42,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); final ColumnInterpreter ci=new LongColumnInterpreter(); Double avg=null; try { avg=aClient.avg(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,avg); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMinWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new LongColumnInterpreter(); long min=aClient.min(TEST_TABLE,ci,scan); assertEquals(5,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[1]); final ColumnInterpreter ci=new LongColumnInterpreter(); Double std=null; try { std=aClient.std(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,std); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new LongColumnInterpreter(); long sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(6 + 60,sum); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testAvgWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new LongColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(9.5,avg,0); }

InternalCallVerifier EqualityVerifier 
/** * This will test the row count with startrow > endrow. The result should be * -1. * @throws Throwable */ @Test(timeout=300000) public void testRowCountWithInvalidRange1(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[2]); final ColumnInterpreter ci=new LongColumnInterpreter(); long rowCount=-1; try { rowCount=aClient.rowCount(TEST_TABLE,ci,scan); } catch ( Throwable e) { myLog.error("Exception thrown in the invalidRange method" + e.getStackTrace()); } assertEquals(-1,rowCount); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new LongColumnInterpreter(); Long min=null; try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
/** * This will test rowcount with a valid range, i.e., a subset of rows. It will * be the most common use case. * @throws Throwable */ @Test(timeout=300000) public void testRowCountWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[2]); scan.setStopRow(ROWS[14]); final ColumnInterpreter ci=new LongColumnInterpreter(); long rowCount=aClient.rowCount(TEST_TABLE,ci,scan); assertEquals(12,rowCount); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithFilter() throws Throwable { Long max=0l; AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new LongColumnInterpreter(); max=aClient.max(TEST_TABLE,ci,scan); assertEquals(null,max); }

InternalCallVerifier EqualityVerifier 
/** * give max for the entire table. * @throws Throwable */ @Test(timeout=300000) public void testMaxWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new LongColumnInterpreter(); long maximum=aClient.max(TEST_TABLE,ci,scan); assertEquals(19,maximum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new LongColumnInterpreter(); long maximum=aClient.max(TEST_TABLE,ci,scan); assertEquals(190,maximum); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new LongColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(5.766,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new LongColumnInterpreter(); Long sum=null; try { sum=aClient.sum(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[1]); final ColumnInterpreter ci=new LongColumnInterpreter(); Double avg=null; try { avg=aClient.avg(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,avg); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new LongColumnInterpreter(); long max=aClient.max(TEST_TABLE,ci,scan); assertEquals(60,max); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testRowCountWithNullCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new LongColumnInterpreter(); long rowCount=aClient.rowCount(TEST_TABLE,ci,scan); assertEquals(20,rowCount); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[2]); final ColumnInterpreter ci=new LongColumnInterpreter(); Long sum=null; try { sum=aClient.sum(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
/** * This will test the row count on the entire table. Startrow and endrow will * be null. * @throws Throwable */ @Test(timeout=300000) public void testRowCountAllTable() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); final ColumnInterpreter ci=new LongColumnInterpreter(); long rowCount=aClient.rowCount(TEST_TABLE,ci,scan); assertEquals(ROWSIZE,rowCount); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testSumWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new LongColumnInterpreter(); long sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(95,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new LongColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(0,std,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new LongColumnInterpreter(); long sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(190 + 1900,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(HConstants.EMPTY_START_ROW); scan.setStopRow(HConstants.EMPTY_END_ROW); final ColumnInterpreter ci=new LongColumnInterpreter(); long min=aClient.min(TEST_TABLE,ci,scan); assertEquals(0,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new LongColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(104.5,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithInvalidRange2(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[6]); final ColumnInterpreter ci=new LongColumnInterpreter(); Long min=null; try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

Class: org.apache.hadoop.hbase.coprocessor.TestBigDecimalColumnInterpreter

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("6.60"),sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); Double avg=null; try { avg=aClient.avg(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,avg); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testAvgWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(9.5,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("209.00"),sum); }

InternalCallVerifier EqualityVerifier 
/** * need to change this * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(6.342,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); BigDecimal min=null; Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[4]); scan.setStopRow(ROWS[2]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMinWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal min=aClient.min(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("5.00"),min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal min=null; try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithFilter() throws Throwable { BigDecimal max=BigDecimal.ZERO; AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); max=aClient.max(TEST_TABLE,ci,scan); assertEquals(null,max); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testAvgWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(9.5,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithInvalidRange2(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[6]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal min=null; try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
/** * give max for the entire table. * @throws Throwable */ @Test(timeout=300000) public void testMaxWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal maximum=aClient.max(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("19.00"),maximum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setFilter(f); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal sum=null; sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); Double avg=null; avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(Double.NaN,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(10.45,avg,0.01); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[17]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); Double std=null; try { std=aClient.std(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,std); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal maximum=aClient.max(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("19.00"),maximum); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMedianWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal median=aClient.median(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("8.00"),median); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal max=aClient.max(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("6.00"),max); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testSumWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("190.00"),sum); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMaxWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal max=aClient.max(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("14.00"),max); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal min=aClient.min(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("0.60"),min); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testSumWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("95.00"),sum); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMinWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(HConstants.EMPTY_START_ROW); scan.setStopRow(HConstants.EMPTY_END_ROW); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal min=aClient.min(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("0.00"),min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal min=null; min=aClient.min(TEST_TABLE,ci,scan); assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[1]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); Double avg=null; try { avg=aClient.avg(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,avg); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(HConstants.EMPTY_START_ROW); scan.setStopRow(HConstants.EMPTY_END_ROW); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal min=aClient.min(TEST_TABLE,ci,scan); assertEquals(new BigDecimal("0.00"),min); }

InternalCallVerifier EqualityVerifier 
@Test public void testStdWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[1]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); Double std=null; try { std=aClient.std(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,std); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setFilter(f); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); Double std=null; std=aClient.std(TEST_TABLE,ci,scan); assertEquals(Double.NaN,std,0); }

InternalCallVerifier EqualityVerifier 
/** * need to change this * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(2.87,std,0.05d); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(5.766,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal sum=null; try { sum=aClient.sum(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); System.out.println("std is:" + std); assertEquals(0,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(6 + 0.60,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[2]); final ColumnInterpreter ci=new BigDecimalColumnInterpreter(); BigDecimal sum=null; try { sum=aClient.sum(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,sum); }

Class: org.apache.hadoop.hbase.coprocessor.TestClassLoading

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testHBase3810() throws Exception { File jarFile1=buildCoprocessorJar(cpName1); File jarFile2=buildCoprocessorJar(cpName2); File jarFile5=buildCoprocessorJar(cpName5); File jarFile6=buildCoprocessorJar(cpName6); String cpKey1="COPROCESSOR$1"; String cpKey2=" Coprocessor$2 "; String cpKey3=" coprocessor$03 "; String cpValue1=getLocalPath(jarFile1) + "|" + cpName1+ "|"+ Coprocessor.PRIORITY_USER; String cpValue2=getLocalPath(jarFile2) + " | " + cpName2+ " | "; String cpValue3=" | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("test")); htd.setValue(cpKey1,cpValue1); htd.setValue(cpKey2,cpValue2); htd.setValue(cpKey3,cpValue3); htd.addCoprocessor(cpName5,new Path(getLocalPath(jarFile5)),Coprocessor.PRIORITY_USER,null); Map kvs=new HashMap(); kvs.put("k1","v1"); kvs.put("k2","v2"); kvs.put("k3","v3"); htd.addCoprocessor(cpName6,new Path(getLocalPath(jarFile6)),Coprocessor.PRIORITY_USER,kvs); Admin admin=TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { admin.disableTable(tableName); } admin.deleteTable(tableName); } admin.createTable(htd); waitForTable(htd.getTableName()); boolean found_2=false, found_1=false, found_3=false, found_5=false, found_6=false; boolean found6_k1=false, found6_k2=false, found6_k3=false, found6_k4=false; MiniHBaseCluster hbase=TEST_UTIL.getHBaseCluster(); for ( Region region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { found_1=found_1 || (region.getCoprocessorHost().findCoprocessor(cpName1) != null); found_2=found_2 || (region.getCoprocessorHost().findCoprocessor(cpName2) != null); found_3=found_3 || (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") != null); found_5=found_5 || (region.getCoprocessorHost().findCoprocessor(cpName5) != null); CoprocessorEnvironment env=region.getCoprocessorHost().findCoprocessorEnvironment(cpName6); if (env != null) { found_6=true; Configuration conf=env.getConfiguration(); found6_k1=conf.get("k1") != null; found6_k2=conf.get("k2") != null; found6_k3=conf.get("k3") != null; } } } assertTrue("Class " + cpName1 + " was missing on a region",found_1); assertTrue("Class " + cpName2 + " was missing on a region",found_2); assertTrue("Class SimpleRegionObserver was missing on a region",found_3); assertTrue("Class " + cpName5 + " was missing on a region",found_5); assertTrue("Class " + cpName6 + " was missing on a region",found_6); assertTrue("Configuration key 'k1' was missing on a region",found6_k1); assertTrue("Configuration key 'k2' was missing on a region",found6_k2); assertTrue("Configuration key 'k3' was missing on a region",found6_k3); assertFalse("Configuration key 'k4' wasn't configured",found6_k4); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testClassLoadingFromHDFS() throws Exception { FileSystem fs=cluster.getFileSystem(); File jarFile1=buildCoprocessorJar(cpName1); File jarFile2=buildCoprocessorJar(cpName2); fs.copyFromLocalFile(new Path(jarFile1.getPath()),new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS1=fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName(); Path pathOnHDFS1=new Path(jarFileOnHDFS1); assertTrue("Copy jar file to HDFS failed.",fs.exists(pathOnHDFS1)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1); fs.copyFromLocalFile(new Path(jarFile2.getPath()),new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS2=fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName(); Path pathOnHDFS2=new Path(jarFileOnHDFS2); assertTrue("Copy jar file to HDFS failed.",fs.exists(pathOnHDFS2)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("test")); htd.setValue("COPROCESSOR$1",jarFileOnHDFS1.toString() + "|" + cpName1+ "|"+ Coprocessor.PRIORITY_USER); htd.setValue("COPROCESSOR$2",jarFileOnHDFS2.toString() + "|" + cpName2+ "|"+ Coprocessor.PRIORITY_USER+ "|k1=v1,k2=v2,k3=v3"); Admin admin=TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { admin.disableTable(tableName); } admin.deleteTable(tableName); } CoprocessorClassLoader.clearCache(); byte[] startKey={10,63}; byte[] endKey={12,43}; admin.createTable(htd,startKey,endKey,4); waitForTable(htd.getTableName()); boolean foundTableRegion=false; boolean found1=true, found2=true, found2_k1=true, found2_k2=true, found2_k3=true; Map> regionsActiveClassLoaders=new HashMap>(); MiniHBaseCluster hbase=TEST_UTIL.getHBaseCluster(); for ( Region region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { foundTableRegion=true; CoprocessorEnvironment env; env=region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); found1=found1 && (env != null); env=region.getCoprocessorHost().findCoprocessorEnvironment(cpName2); found2=found2 && (env != null); if (env != null) { Configuration conf=env.getConfiguration(); found2_k1=found2_k1 && (conf.get("k1") != null); found2_k2=found2_k2 && (conf.get("k2") != null); found2_k3=found2_k3 && (conf.get("k3") != null); } else { found2_k1=found2_k2=found2_k3=false; } regionsActiveClassLoaders.put(region,((CoprocessorHost)region.getCoprocessorHost()).getExternalClassLoaders()); } } assertTrue("No region was found for table " + tableName,foundTableRegion); assertTrue("Class " + cpName1 + " was missing on a region",found1); assertTrue("Class " + cpName2 + " was missing on a region",found2); assertTrue("Configuration key 'k1' was missing on a region",found2_k1); assertTrue("Configuration key 'k2' was missing on a region",found2_k2); assertTrue("Configuration key 'k3' was missing on a region",found2_k3); assertNotNull(jarFileOnHDFS1 + " was not cached",CoprocessorClassLoader.getIfCached(pathOnHDFS1)); assertNotNull(jarFileOnHDFS2 + " was not cached",CoprocessorClassLoader.getIfCached(pathOnHDFS2)); assertEquals("The number of cached classloaders should be equal to the number" + " of external jar files",2,CoprocessorClassLoader.getAllCached().size()); Set externalClassLoaders=new HashSet(CoprocessorClassLoader.getAllCached()); for ( Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached."+ " ClassLoader Cache:"+ externalClassLoaders+ " Region ClassLoaders:"+ regionCP.getValue(),externalClassLoaders.containsAll(regionCP.getValue())); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMasterCoprocessorsReported(){ final String loadedMasterCoprocessorsVerify="[" + masterCoprocessor.getSimpleName() + "]"; String loadedMasterCoprocessors=java.util.Arrays.toString(TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); assertEquals(loadedMasterCoprocessorsVerify,loadedMasterCoprocessors); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFindCoprocessors(){ CoprocessorHost masterCpHost=TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost(); List masterObservers=masterCpHost.findCoprocessors(MasterObserver.class); assertTrue(masterObservers != null && masterObservers.size() > 0); assertEquals(masterCoprocessor.getSimpleName(),masterObservers.get(0).getClass().getSimpleName()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testClassLoadingFromLocalFS() throws Exception { File jarFile=buildCoprocessorJar(cpName3); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(cpName3)); htd.addFamily(new HColumnDescriptor("test")); htd.setValue("COPROCESSOR$1",getLocalPath(jarFile) + "|" + cpName3+ "|"+ Coprocessor.PRIORITY_USER); Admin admin=TEST_UTIL.getHBaseAdmin(); admin.createTable(htd); waitForTable(htd.getTableName()); boolean found=false; MiniHBaseCluster hbase=TEST_UTIL.getHBaseCluster(); for ( Region region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) { found=(region.getCoprocessorHost().findCoprocessor(cpName3) != null); } } assertTrue("Class " + cpName3 + " was missing on a region",found); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPrivateClassLoader() throws Exception { File jarFile=buildCoprocessorJar(cpName4); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(cpName4)); htd.addFamily(new HColumnDescriptor("test")); htd.setValue("COPROCESSOR$1",getLocalPath(jarFile) + "|" + cpName4+ "|"+ Coprocessor.PRIORITY_USER); Admin admin=TEST_UTIL.getHBaseAdmin(); admin.createTable(htd); waitForTable(htd.getTableName()); boolean found=false; MiniHBaseCluster hbase=TEST_UTIL.getHBaseCluster(); for ( Region region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) { Coprocessor cp=region.getCoprocessorHost().findCoprocessor(cpName4); if (cp != null) { found=true; assertEquals("Class " + cpName4 + " was not loaded by CoprocessorClassLoader",cp.getClass().getClassLoader().getClass(),CoprocessorClassLoader.class); } } } assertTrue("Class " + cpName4 + " was missing on a region",found); }

Class: org.apache.hadoop.hbase.coprocessor.TestCoprocessorEndpoint

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCoprocessorService() throws Throwable { Table table=util.getConnection().getTable(TEST_TABLE); List regions; try (RegionLocator rl=util.getConnection().getRegionLocator(TEST_TABLE)){ regions=rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request=TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); final Map results=Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); try { final RpcController controller=new ServerRpcController(); table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class,ROWS[0],ROWS[ROWS.length - 1],new Batch.Call(){ public TestProtos.EchoResponseProto call( TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); BlockingRpcCallback callback=new BlockingRpcCallback(); instance.echo(controller,request,callback); TestProtos.EchoResponseProto response=callback.get(); LOG.debug("Batch.Call returning result " + response); return response; } } ,new Batch.Callback(){ public void update( byte[] region, byte[] row, TestProtos.EchoResponseProto result){ assertNotNull(result); assertEquals("hello",result.getMessage()); results.put(region,result.getMessage()); } } ); for ( Map.Entry e : results.entrySet()) { LOG.info("Got value " + e.getValue() + " for region "+ Bytes.toStringBinary(e.getKey())); } assertEquals(3,results.size()); for ( HRegionLocation info : regions) { LOG.info("Region info is " + info.getRegionInfo().getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionInfo().getRegionName())); } results.clear(); table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class,ROWS[rowSeperator1],ROWS[ROWS.length - 1],new Batch.Call(){ public TestProtos.EchoResponseProto call( TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); BlockingRpcCallback callback=new BlockingRpcCallback(); instance.echo(controller,request,callback); TestProtos.EchoResponseProto response=callback.get(); LOG.debug("Batch.Call returning result " + response); return response; } } ,new Batch.Callback(){ public void update( byte[] region, byte[] row, TestProtos.EchoResponseProto result){ assertNotNull(result); assertEquals("hello",result.getMessage()); results.put(region,result.getMessage()); } } ); for ( Map.Entry e : results.entrySet()) { LOG.info("Got value " + e.getValue() + " for region "+ Bytes.toStringBinary(e.getKey())); } assertEquals(2,results.size()); } finally { table.close(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testMasterCoprocessorService() throws Throwable { Admin admin=util.getHBaseAdmin(); final TestProtos.EchoRequestProto request=TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service=TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); assertEquals("hello",service.echo(null,request).getMessage()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCoprocessorServiceNullResponse() throws Throwable { Table table=util.getConnection().getTable(TEST_TABLE); List regions; try (RegionLocator rl=util.getConnection().getRegionLocator(TEST_TABLE)){ regions=rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request=TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); try { final RpcController controller=new ServerRpcController(); Map results=table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class,ROWS[0],ROWS[ROWS.length - 1],new Batch.Call(){ public String call( TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { BlockingRpcCallback callback=new BlockingRpcCallback(); instance.echo(controller,request,callback); TestProtos.EchoResponseProto response=callback.get(); LOG.debug("Batch.Call got result " + response); return null; } } ); for ( Map.Entry e : results.entrySet()) { LOG.info("Got value " + e.getValue() + " for region "+ Bytes.toStringBinary(e.getKey())); } assertEquals(3,results.size()); for ( HRegionLocation region : regions) { HRegionInfo info=region.getRegionInfo(); LOG.info("Region info is " + info.getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionName())); assertNull(results.get(info.getRegionName())); } } finally { table.close(); } }

Class: org.apache.hadoop.hbase.coprocessor.TestCoprocessorHost

InternalCallVerifier EqualityVerifier 
@Test public void testDoubleLoading(){ final Configuration conf=HBaseConfiguration.create(); CoprocessorHost host=new CoprocessorHost(new TestAbortable()){ final Configuration cpHostConf=conf; @Override public CoprocessorEnvironment createEnvironment( Class implClass, final Coprocessor instance, int priority, int sequence, Configuration conf){ return new CoprocessorEnvironment(){ final Coprocessor envInstance=instance; @Override public int getVersion(){ return 0; } @Override public String getHBaseVersion(){ return "0.0.0"; } @Override public Coprocessor getInstance(){ return envInstance; } @Override public int getPriority(){ return 0; } @Override public int getLoadSequence(){ return 0; } @Override public Configuration getConfiguration(){ return cpHostConf; } @Override public Table getTable( TableName tableName) throws IOException { return null; } @Override public Table getTable( TableName tableName, ExecutorService service) throws IOException { return null; } @Override public ClassLoader getClassLoader(){ return null; } } ; } } ; final String key="KEY"; final String coprocessor="org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; conf.setStrings(key,coprocessor,coprocessor,coprocessor); host.loadSystemCoprocessors(conf,key); Assert.assertEquals(1,host.coprocessors.size()); }

Class: org.apache.hadoop.hbase.coprocessor.TestCoprocessorInterface

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testSharedData() throws IOException { TableName tableName=TableName.valueOf(name.getMethodName()); byte[][] families={fam1,fam2,fam3}; Configuration hc=initSplit(); Region region=initHRegion(tableName,name.getMethodName(),hc,new Class[]{},families); for (int i=0; i < 3; i++) { HBaseTestCase.addContent(region,fam3); region.flush(true); } region.compact(false); byte[] splitRow=((HRegion)region).checkSplit(); assertNotNull(splitRow); Region[] regions=split(region,splitRow); for (int i=0; i < regions.length; i++) { regions[i]=reopenRegion(regions[i],CoprocessorImpl.class,CoprocessorII.class); } Coprocessor c=regions[0].getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName()); Coprocessor c2=regions[0].getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName()); Object o=((CoprocessorImpl)c).getSharedData().get("test1"); Object o2=((CoprocessorII)c2).getSharedData().get("test2"); assertNotNull(o); assertNotNull(o2); assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData()); for (int i=1; i < regions.length; i++) { c=regions[i].getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName()); c2=regions[i].getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName()); assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2); } for (int i=0; i < regions.length; i++) { try { byte[] r=regions[i].getRegionInfo().getStartKey(); if (r == null || r.length <= 0) { r=new byte[]{0}; } Get g=new Get(r); regions[i].get(g); fail(); } catch ( org.apache.hadoop.hbase.DoNotRetryIOException xc) { } assertNull(regions[i].getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName())); } c=regions[0].getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName()); assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); c=c2=null; System.gc(); region=reopenRegion(regions[0],CoprocessorImpl.class,CoprocessorII.class); c=region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName()); assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); c2=region.getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName()); Object o3=((CoprocessorII)c2).getSharedData().get("test2"); assertFalse(o3 == o2); HBaseTestingUtility.closeRegionAndWAL(region); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testCoprocessorInterface() throws IOException { TableName tableName=TableName.valueOf(name.getMethodName()); byte[][] families={fam1,fam2,fam3}; Configuration hc=initSplit(); Region region=initHRegion(tableName,name.getMethodName(),hc,new Class[]{CoprocessorImpl.class},families); for (int i=0; i < 3; i++) { HBaseTestCase.addContent(region,fam3); region.flush(true); } region.compact(false); byte[] splitRow=((HRegion)region).checkSplit(); assertNotNull(splitRow); Region[] regions=split(region,splitRow); for (int i=0; i < regions.length; i++) { regions[i]=reopenRegion(regions[i],CoprocessorImpl.class); } HBaseTestingUtility.closeRegionAndWAL(region); Coprocessor c=region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName()); Scan s=new Scan(); RegionScanner scanner=regions[0].getCoprocessorHost().postScannerOpen(s,regions[0].getScanner(s)); assertTrue(scanner instanceof CustomScanner); scanner.next(new ArrayList()); assertTrue("Coprocessor not started",((CoprocessorImpl)c).wasStarted()); assertTrue("Coprocessor not stopped",((CoprocessorImpl)c).wasStopped()); assertTrue(((CoprocessorImpl)c).wasOpened()); assertTrue(((CoprocessorImpl)c).wasClosed()); assertTrue(((CoprocessorImpl)c).wasFlushed()); assertTrue(((CoprocessorImpl)c).wasCompacted()); assertTrue(((CoprocessorImpl)c).wasSplit()); for (int i=0; i < regions.length; i++) { HBaseTestingUtility.closeRegionAndWAL(regions[i]); c=region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName()); assertTrue("Coprocessor not started",((CoprocessorImpl)c).wasStarted()); assertTrue("Coprocessor not stopped",((CoprocessorImpl)c).wasStopped()); assertTrue(((CoprocessorImpl)c).wasOpened()); assertTrue(((CoprocessorImpl)c).wasClosed()); assertTrue(((CoprocessorImpl)c).wasCompacted()); } }

Class: org.apache.hadoop.hbase.coprocessor.TestCoprocessorStop

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testStopped() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); LOG.info("shutdown hbase cluster..."); cluster.shutdown(); LOG.info("wait for the hbase cluster shutdown..."); cluster.waitUntilShutDown(); Configuration conf=UTIL.getConfiguration(); FileSystem fs=FileSystem.get(conf); Path resultFile=new Path(UTIL.getDataTestDirOnTestFS(),MASTER_FILE); assertTrue("Master flag file should have been created",fs.exists(resultFile)); resultFile=new Path(UTIL.getDataTestDirOnTestFS(),REGIONSERVER_FILE); assertTrue("RegionServer flag file should have been created",fs.exists(resultFile)); }

Class: org.apache.hadoop.hbase.coprocessor.TestDoubleColumnInterpreter

InternalCallVerifier EqualityVerifier 
/** * need to change this * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(2.87,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Double min=null; Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[4]); scan.setStopRow(ROWS[2]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); System.out.println("std is:" + std); assertEquals(0,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setFilter(f); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double sum=null; sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(10.45,avg,0.01); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMedianWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double median=aClient.median(TEST_TABLE,ci,scan); assertEquals(8.00,median,0.00); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testSumWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(95.00,sum,0.00); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double min=aClient.min(TEST_TABLE,ci,scan); assertEquals(0.60,min,0.001); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMaxWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double max=aClient.max(TEST_TABLE,ci,scan); assertEquals(14.00,max,0.00); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testAvgWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(9.5,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double avg=null; try { avg=aClient.avg(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,avg); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double sum=null; try { sum=aClient.sum(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double max=aClient.max(TEST_TABLE,ci,scan); assertEquals(6.00,max,0.00); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testSumWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(190.00,sum,0.00); }

InternalCallVerifier EqualityVerifier 
/** * give max for the entire table. * @throws Throwable */ @Test(timeout=300000) public void testMaxWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double maximum=aClient.max(TEST_TABLE,ci,scan); assertEquals(19.00,maximum,0.00); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(HConstants.EMPTY_START_ROW); scan.setStopRow(HConstants.EMPTY_END_ROW); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double min=aClient.min(TEST_TABLE,ci,scan); assertEquals(0.00,min,0.00); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMinWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(HConstants.EMPTY_START_ROW); scan.setStopRow(HConstants.EMPTY_END_ROW); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double min=aClient.min(TEST_TABLE,ci,scan); assertEquals(0.00,min,0.00); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double avg=null; avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(Double.NaN,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(209.00,sum,0.00); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double maximum=aClient.max(TEST_TABLE,ci,scan); assertEquals(19.00,maximum,0.00); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(6 + 0.60,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test public void testStdWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[1]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double std=null; try { std=aClient.std(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,std); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMaxWithFilter() throws Throwable { Double max=0.00d; AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new DoubleColumnInterpreter(); max=aClient.max(TEST_TABLE,ci,scan); assertEquals(null,max); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testAvgWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double avg=aClient.avg(TEST_TABLE,ci,scan); assertEquals(9.5,avg,0); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithValidRange2WithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[7]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double sum=aClient.sum(TEST_TABLE,ci,scan); assertEquals(6.60,sum,0.00); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testAvgWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[1]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double avg=null; try { avg=aClient.avg(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,avg); }

InternalCallVerifier EqualityVerifier 
/** * need to change this * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRangeWithNoCQ() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(6.342,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithInvalidRange2(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[6]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double min=null; try { min=aClient.min(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setFilter(f); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double std=null; std=aClient.std(TEST_TABLE,ci,scan); assertEquals(Double.NaN,std,0); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testStdWithValidRange() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double std=aClient.std(TEST_TABLE,ci,scan); assertEquals(5.766,std,0.05d); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSumWithInvalidRange(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addFamily(TEST_FAMILY); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[2]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double sum=null; try { sum=aClient.sum(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,sum); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testMinWithFilter() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); Filter f=new PrefixFilter(Bytes.toBytes("foo:bar")); scan.setFilter(f); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double min=null; min=aClient.min(TEST_TABLE,ci,scan); assertEquals(null,min); }

InternalCallVerifier EqualityVerifier 
/** * @throws Throwable */ @Test(timeout=300000) public void testMinWithValidRange2() throws Throwable { AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.addColumn(TEST_FAMILY,TEST_QUALIFIER); scan.setStartRow(ROWS[5]); scan.setStopRow(ROWS[15]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); double min=aClient.min(TEST_TABLE,ci,scan); assertEquals(5.00,min,0.00); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testStdWithValidRangeWithNullCF(){ AggregationClient aClient=new AggregationClient(conf); Scan scan=new Scan(); scan.setStartRow(ROWS[6]); scan.setStopRow(ROWS[17]); final ColumnInterpreter ci=new DoubleColumnInterpreter(); Double std=null; try { std=aClient.std(TEST_TABLE,ci,scan); } catch ( Throwable e) { } assertEquals(null,std); }

Class: org.apache.hadoop.hbase.coprocessor.TestHTableWrapper

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testHTableInterfaceMethods() throws Exception { Configuration conf=util.getConfiguration(); MasterCoprocessorHost cpHost=util.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); Class implClazz=DummyRegionObserver.class; cpHost.load(implClazz,Coprocessor.PRIORITY_HIGHEST,conf); CoprocessorEnvironment env=cpHost.findCoprocessorEnvironment(implClazz.getName()); assertEquals(Coprocessor.VERSION,env.getVersion()); assertEquals(VersionInfo.getVersion(),env.getHBaseVersion()); hTableInterface=env.getTable(TEST_TABLE); checkHTableInterfaceMethods(); cpHost.shutdown(env); }

Class: org.apache.hadoop.hbase.coprocessor.TestMasterCoprocessorExceptionWithAbort

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); BuggyMasterObserver cp=(BuggyMasterObserver)host.findCoprocessor(BuggyMasterObserver.class.getName()); assertFalse("No table created yet",cp.wasCreateTableCalled()); ZooKeeperWatcher zkw=new ZooKeeperWatcher(UTIL.getConfiguration(),"unittest",new Abortable(){ @Override public void abort( String why, Throwable e){ throw new RuntimeException("Fatal ZK error: " + why,e); } @Override public boolean isAborted(){ return false; } } ); MasterTracker masterTracker=new MasterTracker(zkw,"/hbase/master",new Abortable(){ @Override public void abort( String why, Throwable e){ throw new RuntimeException("Fatal ZK master tracker error, why=",e); } @Override public boolean isAborted(){ return false; } } ); masterTracker.start(); zkw.registerListener(masterTracker); assertTrue(HMaster.getLoadedCoprocessors().contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName())); CreateTableThread createTableThread=new CreateTableThread(UTIL); createTableThread.start(); for (int i=0; i < 30; i++) { if (masterTracker.masterZKNodeWasDeleted == true) { break; } try { Thread.sleep(1000); } catch ( InterruptedException e) { fail("InterruptedException while waiting for master zk node to " + "be deleted."); } } assertTrue("Master aborted on coprocessor exception, as expected.",masterTracker.masterZKNodeWasDeleted); createTableThread.interrupt(); try { createTableThread.join(1000); } catch ( InterruptedException e) { assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().",true); } }

Class: org.apache.hadoop.hbase.coprocessor.TestMasterCoprocessorExceptionWithRemove

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=30000) public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); BuggyMasterObserver cp=(BuggyMasterObserver)host.findCoprocessor(BuggyMasterObserver.class.getName()); assertFalse("No table created yet",cp.wasCreateTableCalled()); ZooKeeperWatcher zkw=new ZooKeeperWatcher(UTIL.getConfiguration(),"unittest",new Abortable(){ @Override public void abort( String why, Throwable e){ throw new RuntimeException("Fatal ZK error: " + why,e); } @Override public boolean isAborted(){ return false; } } ); MasterTracker masterTracker=new MasterTracker(zkw,"/hbase/master",new Abortable(){ @Override public void abort( String why, Throwable e){ throw new RuntimeException("Fatal Zookeeper tracker error, why=",e); } @Override public boolean isAborted(){ return false; } } ); masterTracker.start(); zkw.registerListener(masterTracker); String coprocessorName=BuggyMasterObserver.class.getName(); assertTrue(HMaster.getLoadedCoprocessors().contains(coprocessorName)); HTableDescriptor htd1=new HTableDescriptor(TableName.valueOf(TEST_TABLE1)); htd1.addFamily(new HColumnDescriptor(TEST_FAMILY1)); boolean threwDNRE=false; try { Admin admin=UTIL.getHBaseAdmin(); admin.createTable(htd1); } catch ( IOException e) { if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) { threwDNRE=true; } } finally { assertTrue(threwDNRE); } try { Thread.sleep(3000); } catch ( InterruptedException e) { fail("InterruptedException while sleeping."); } assertFalse("Master survived coprocessor NPE, as expected.",masterTracker.masterZKNodeWasDeleted); String loadedCoprocessors=HMaster.getLoadedCoprocessors(); assertTrue(loadedCoprocessors.contains(coprocessorName)); HTableDescriptor htd2=new HTableDescriptor(TableName.valueOf(TEST_TABLE2)); htd2.addFamily(new HColumnDescriptor(TEST_FAMILY2)); Admin admin=UTIL.getHBaseAdmin(); try { admin.createTable(htd2); } catch ( IOException e) { fail("Failed to create table after buggy coprocessor removal: " + e); } }

Class: org.apache.hadoop.hbase.coprocessor.TestMasterObserver

InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testListProceduresOperation() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.resetStates(); master.listProcedures(); assertTrue("Coprocessor should be called on list procedures request",cp.wasListProceduresCalled()); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testAbortProcedureOperation() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.resetStates(); master.abortProcedure(1,true); assertTrue("Coprocessor should be called on abort procedure request",cp.wasAbortProcedureCalled()); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testSnapshotOperations() throws Exception { final TableName tableName=TableName.valueOf(name.getMethodName()); MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.resetStates(); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); Admin admin=UTIL.getHBaseAdmin(); tableCreationLatch=new CountDownLatch(1); admin.createTable(htd); tableCreationLatch.await(); tableCreationLatch=new CountDownLatch(1); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); try { assertFalse("Coprocessor should not have been called yet",cp.wasSnapshotCalled()); admin.snapshot(TEST_SNAPSHOT,tableName); assertTrue("Coprocessor should have been called on snapshot",cp.wasSnapshotCalled()); admin.listSnapshots(); assertTrue("Coprocessor should have been called on snapshot list",cp.wasListSnapshotCalled()); admin.cloneSnapshot(TEST_SNAPSHOT,TEST_CLONE); assertTrue("Coprocessor should have been called on snapshot clone",cp.wasCloneSnapshotCalled()); assertFalse("Coprocessor restore should not have been called on snapshot clone",cp.wasRestoreSnapshotCalled()); admin.disableTable(TEST_CLONE); assertTrue(admin.isTableDisabled(tableName)); deleteTable(admin,TEST_CLONE); cp.resetStates(); admin.restoreSnapshot(TEST_SNAPSHOT); assertTrue("Coprocessor should have been called on snapshot restore",cp.wasRestoreSnapshotCalled()); assertFalse("Coprocessor clone should not have been called on snapshot restore",cp.wasCloneSnapshotCalled()); admin.deleteSnapshot(TEST_SNAPSHOT); assertTrue("Coprocessor should have been called on snapshot delete",cp.wasDeleteSnapshotCalled()); } finally { deleteTable(admin,tableName); } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=180000) public void testRegionTransitionOperations() throws Exception { final TableName tableName=TableName.valueOf(name.getMethodName()); MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.enableBypass(false); cp.resetStates(); Table table=UTIL.createMultiRegionTable(tableName,TEST_FAMILY); try (RegionLocator r=UTIL.getConnection().getRegionLocator(tableName)){ UTIL.waitUntilAllRegionsAssigned(tableName); List regions=r.getAllRegionLocations(); HRegionLocation firstGoodPair=null; for ( HRegionLocation e : regions) { if (e.getServerName() != null) { firstGoodPair=e; break; } } assertNotNull("Found a non-null entry",firstGoodPair); LOG.info("Found " + firstGoodPair.toString()); Collection servers=master.getClusterStatus().getServers(); String destName=null; String serverNameForFirstRegion=firstGoodPair.getServerName().toString(); LOG.info("serverNameForFirstRegion=" + serverNameForFirstRegion); ServerName masterServerName=master.getServerName(); boolean found=false; for ( ServerName info : servers) { LOG.info("ServerName=" + info); if (!serverNameForFirstRegion.equals(info.getServerName()) && !masterServerName.equals(info)) { destName=info.toString(); found=true; break; } } assertTrue("Found server",found); LOG.info("Found " + destName); master.getMasterRpcServices().moveRegion(null,RequestConverter.buildMoveRegionRequest(firstGoodPair.getRegionInfo().getEncodedNameAsBytes(),Bytes.toBytes(destName))); assertTrue("Coprocessor should have been called on region move",cp.wasMoveCalled()); master.balanceSwitch(true); assertTrue("Coprocessor should have been called on balance switch",cp.wasBalanceSwitchCalled()); master.balanceSwitch(false); AssignmentManager mgr=master.getAssignmentManager(); Collection transRegions=mgr.getRegionStates().getRegionsInTransition().values(); for ( RegionState state : transRegions) { mgr.getRegionStates().waitOnRegionToClearRegionsInTransition(state.getRegion()); } HRegionServer rs=cluster.getRegionServer(0); byte[] destRS=Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString()); waitForRITtoBeZero(master); List openRegions=ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); int moveCnt=openRegions.size() / 2; for (int i=0; i < moveCnt; i++) { HRegionInfo info=openRegions.get(i); if (!info.isMetaTable()) { master.getMasterRpcServices().moveRegion(null,RequestConverter.buildMoveRegionRequest(openRegions.get(i).getEncodedNameAsBytes(),destRS)); } } waitForRITtoBeZero(master); master.balanceSwitch(true); boolean balanceRun=master.balance(); assertTrue("Coprocessor should be called on region rebalancing",cp.wasBalanceCalled()); } finally { Admin admin=UTIL.getHBaseAdmin(); admin.disableTable(tableName); deleteTable(admin,tableName); } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=180000) public void testTableOperations() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); final TableName tableName=TableName.valueOf(name.getMethodName()); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.enableBypass(true); cp.resetStates(); assertFalse("No table created yet",cp.wasCreateTableCalled()); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); try (Connection connection=ConnectionFactory.createConnection(UTIL.getConfiguration());Admin admin=connection.getAdmin()){ tableCreationLatch=new CountDownLatch(1); admin.createTable(htd,Arrays.copyOfRange(HBaseTestingUtility.KEYS,1,HBaseTestingUtility.KEYS.length)); assertTrue("Test table should be created",cp.wasCreateTableCalled()); tableCreationLatch.await(); assertTrue("Table pre create handler called.",cp.wasPreCreateTableHandlerCalled()); assertTrue("Table create handler should be called.",cp.wasCreateTableHandlerCalled()); RegionLocator regionLocator=connection.getRegionLocator(htd.getTableName()); List regions=regionLocator.getAllRegionLocations(); admin.mergeRegions(regions.get(0).getRegionInfo().getEncodedNameAsBytes(),regions.get(1).getRegionInfo().getEncodedNameAsBytes(),true); assertTrue("Coprocessor should have been called on region merge",cp.wasDispatchMergeCalled()); tableCreationLatch=new CountDownLatch(1); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); assertTrue("Coprocessor should have been called on table disable",cp.wasDisableTableCalled()); assertTrue("Disable table handler should be called.",cp.wasDisableTableHandlerCalled()); assertFalse(cp.wasEnableTableCalled()); admin.enableTable(tableName); assertTrue(admin.isTableEnabled(tableName)); assertTrue("Coprocessor should have been called on table enable",cp.wasEnableTableCalled()); assertTrue("Enable table handler should be called.",cp.wasEnableTableHandlerCalled()); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); htd.setMaxFileSize(512 * 1024 * 1024); modifyTableSync(admin,tableName,htd); assertTrue("Test table should have been modified",cp.wasModifyTableCalled()); admin.addColumnFamily(tableName,new HColumnDescriptor(TEST_FAMILY2)); assertTrue("New column family shouldn't have been added to test table",cp.preAddColumnCalledOnly()); HColumnDescriptor hcd1=new HColumnDescriptor(TEST_FAMILY2); hcd1.setMaxVersions(25); admin.modifyColumnFamily(tableName,hcd1); assertTrue("Second column family should be modified",cp.preModifyColumnCalledOnly()); admin.truncateTable(tableName,false); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); deleteTable(admin,tableName); assertFalse("Test table should have been deleted",admin.tableExists(tableName)); assertTrue("Coprocessor should have been called on table delete",cp.wasDeleteTableCalled()); assertTrue("Delete table handler should be called.",cp.wasDeleteTableHandlerCalled()); cp.enableBypass(false); cp.resetStates(); admin.createTable(htd); assertTrue("Test table should be created",cp.wasCreateTableCalled()); tableCreationLatch.await(); assertTrue("Table pre create handler called.",cp.wasPreCreateTableHandlerCalled()); assertTrue("Table create handler should be called.",cp.wasCreateTableHandlerCalled()); assertFalse(cp.wasDisableTableCalled()); assertFalse(cp.wasDisableTableHandlerCalled()); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); assertTrue("Coprocessor should have been called on table disable",cp.wasDisableTableCalled()); assertTrue("Disable table handler should be called.",cp.wasDisableTableHandlerCalled()); htd.setMaxFileSize(512 * 1024 * 1024); modifyTableSync(admin,tableName,htd); assertTrue("Test table should have been modified",cp.wasModifyTableCalled()); admin.addColumnFamily(tableName,new HColumnDescriptor(TEST_FAMILY2)); assertTrue("New column family should have been added to test table",cp.wasAddColumnCalled()); assertTrue("Add column handler should be called.",cp.wasAddColumnHandlerCalled()); HColumnDescriptor hcd=new HColumnDescriptor(TEST_FAMILY2); hcd.setMaxVersions(25); admin.modifyColumnFamily(tableName,hcd); assertTrue("Second column family should be modified",cp.wasModifyColumnCalled()); assertTrue("Modify table handler should be called.",cp.wasModifyColumnHandlerCalled()); assertFalse(cp.wasEnableTableCalled()); assertFalse(cp.wasEnableTableHandlerCalled()); admin.enableTable(tableName); assertTrue(admin.isTableEnabled(tableName)); assertTrue("Coprocessor should have been called on table enable",cp.wasEnableTableCalled()); assertTrue("Enable table handler should be called.",cp.wasEnableTableHandlerCalled()); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); assertFalse("No column family deleted yet",cp.wasDeleteColumnCalled()); assertFalse("Delete table column handler should not be called.",cp.wasDeleteColumnHandlerCalled()); admin.deleteColumnFamily(tableName,TEST_FAMILY2); HTableDescriptor tableDesc=admin.getTableDescriptor(tableName); assertNull("'" + Bytes.toString(TEST_FAMILY2) + "' should have been removed",tableDesc.getFamily(TEST_FAMILY2)); assertTrue("Coprocessor should have been called on column delete",cp.wasDeleteColumnCalled()); assertTrue("Delete table column handler should be called.",cp.wasDeleteColumnHandlerCalled()); assertFalse("No table deleted yet",cp.wasDeleteTableCalled()); assertFalse("Delete table handler should not be called.",cp.wasDeleteTableHandlerCalled()); deleteTable(admin,tableName); assertFalse("Test table should have been deleted",admin.tableExists(tableName)); assertTrue("Coprocessor should have been called on table delete",cp.wasDeleteTableCalled()); assertTrue("Delete table handler should be called.",cp.wasDeleteTableHandlerCalled()); } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=180000) public void testNamespaceOperations() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); String testNamespace="observed_ns"; HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.enableBypass(false); cp.resetStates(); Admin admin=UTIL.getHBaseAdmin(); admin.createNamespace(NamespaceDescriptor.create(testNamespace).build()); assertTrue("Test namespace should be created",cp.wasCreateNamespaceCalled()); assertNotNull(admin.getNamespaceDescriptor(testNamespace)); assertTrue("Test namespace descriptor should have been called",cp.wasGetNamespaceDescriptorCalled()); cp.enableBypass(true); cp.resetStates(); boolean expected=false; try { admin.modifyNamespace(NamespaceDescriptor.create(testNamespace).build()); } catch ( BypassCoprocessorException ce) { expected=true; } assertTrue(expected); assertTrue("Test namespace should not have been modified",cp.preModifyNamespaceCalledOnly()); assertNotNull(admin.getNamespaceDescriptor(testNamespace)); assertTrue("Test namespace descriptor should have been called",cp.wasGetNamespaceDescriptorCalled()); expected=false; try { admin.deleteNamespace(testNamespace); } catch ( BypassCoprocessorException ce) { expected=true; } assertTrue(expected); assertTrue("Test namespace should not have been deleted",cp.preDeleteNamespaceCalledOnly()); assertNotNull(admin.getNamespaceDescriptor(testNamespace)); assertTrue("Test namespace descriptor should have been called",cp.wasGetNamespaceDescriptorCalled()); cp.enableBypass(false); cp.resetStates(); admin.modifyNamespace(NamespaceDescriptor.create(testNamespace).build()); assertTrue("Test namespace should have been modified",cp.wasModifyNamespaceCalled()); admin.deleteNamespace(testNamespace); assertTrue("Test namespace should have been deleted",cp.wasDeleteNamespaceCalled()); cp.enableBypass(true); cp.resetStates(); expected=false; try { admin.createNamespace(NamespaceDescriptor.create(testNamespace).build()); } catch ( BypassCoprocessorException ce) { expected=true; } assertTrue(expected); assertTrue("Test namespace should not be created",cp.preCreateNamespaceCalledOnly()); cp.enableBypass(true); cp.resetStates(); admin.listNamespaceDescriptors(); assertTrue("post listNamespace should not have been called",cp.preListNamespaceDescriptorsCalledOnly()); cp.enableBypass(false); cp.resetStates(); admin.listNamespaceDescriptors(); assertTrue("post listNamespace should have been called",cp.wasListNamespaceDescriptorsCalled()); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=180000) public void testStarted() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); assertTrue("Master should be active",master.isActiveMaster()); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); assertNotNull("CoprocessorHost should not be null",host); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); assertNotNull("CPMasterObserver coprocessor not found or not installed!",cp); assertTrue("MasterObserver should have been started",cp.wasStarted()); assertTrue("preMasterInitialization() hook should have been called",cp.wasMasterInitializationCalled()); assertTrue("postStartMaster() hook should have been called",cp.wasStartMasterCalled()); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testTableDescriptorsEnumeration() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.resetStates(); GetTableDescriptorsRequest req=RequestConverter.buildGetTableDescriptorsRequest((List)null); master.getMasterRpcServices().getTableDescriptors(null,req); assertTrue("Coprocessor should be called on table descriptors request",cp.wasGetTableDescriptorsCalled()); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testTableNamesEnumeration() throws Exception { MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); MasterCoprocessorHost host=master.getMasterCoprocessorHost(); CPMasterObserver cp=(CPMasterObserver)host.findCoprocessor(CPMasterObserver.class.getName()); cp.resetStates(); master.getMasterRpcServices().getTableNames(null,GetTableNamesRequest.newBuilder().build()); assertTrue("Coprocessor should be called on table names request",cp.wasGetTableNamesCalled()); }

Class: org.apache.hadoop.hbase.coprocessor.TestRegionObserverInterface

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Tests overriding compaction handling via coprocessor hooks * @throws Exception */ @Test(timeout=300000) public void testCompactionOverride() throws Exception { TableName compactTable=TableName.valueOf("TestCompactionOverride"); Admin admin=util.getHBaseAdmin(); if (admin.tableExists(compactTable)) { admin.disableTable(compactTable); admin.deleteTable(compactTable); } HTableDescriptor htd=new HTableDescriptor(compactTable); htd.addFamily(new HColumnDescriptor(A)); htd.addCoprocessor(EvenOnlyCompactor.class.getName()); admin.createTable(htd); Table table=util.getConnection().getTable(compactTable); for (long i=1; i <= 10; i++) { byte[] iBytes=Bytes.toBytes(i); Put put=new Put(iBytes); put.setDurability(Durability.SKIP_WAL); put.addColumn(A,A,iBytes); table.put(put); } HRegion firstRegion=cluster.getRegions(compactTable).get(0); Coprocessor cp=firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName()); assertNotNull("EvenOnlyCompactor coprocessor should be loaded",cp); EvenOnlyCompactor compactor=(EvenOnlyCompactor)cp; long ts=System.currentTimeMillis(); admin.flush(compactTable); for (int i=0; i < 10; i++) { if (compactor.lastFlush >= ts) { break; } Thread.sleep(1000); } assertTrue("Flush didn't complete",compactor.lastFlush >= ts); LOG.debug("Flush complete"); ts=compactor.lastFlush; admin.majorCompact(compactTable); for (int i=0; i < 30; i++) { if (compactor.lastCompaction >= ts) { break; } Thread.sleep(1000); } LOG.debug("Last compaction was at " + compactor.lastCompaction); assertTrue("Compaction didn't complete",compactor.lastCompaction >= ts); ResultScanner scanner=table.getScanner(new Scan()); try { for (long i=2; i <= 10; i+=2) { Result r=scanner.next(); assertNotNull(r); assertFalse(r.isEmpty()); byte[] iBytes=Bytes.toBytes(i); assertArrayEquals("Row should be " + i,r.getRow(),iBytes); assertArrayEquals("Value should be " + i,r.getValue(A,A),iBytes); } } finally { scanner.close(); } table.close(); }

Class: org.apache.hadoop.hbase.coprocessor.TestRegionObserverScannerOpenHook

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do * the usual compaction mechanism on the region, rather than going through the backdoor to the * region */ @Test public void testRegionObserverCompactionTimeStacking() throws Exception { Configuration conf=UTIL.getConfiguration(); conf.setClass(HConstants.REGION_IMPL,CompactionCompletionNotifyingRegion.class,HRegion.class); conf.setInt("hbase.hstore.compaction.min",2); UTIL.startMiniCluster(); String tableName="testRegionObserverCompactionTimeStacking"; byte[] ROW=Bytes.toBytes("testRow"); byte[] A=Bytes.toBytes("A"); HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(A)); desc.addCoprocessor(EmptyRegionObsever.class.getName(),null,Coprocessor.PRIORITY_USER,null); desc.addCoprocessor(NoDataFromCompaction.class.getName(),null,Coprocessor.PRIORITY_HIGHEST,null); Admin admin=UTIL.getHBaseAdmin(); admin.createTable(desc); Table table=UTIL.getConnection().getTable(desc.getTableName()); Put put=new Put(ROW); put.addColumn(A,A,A); table.put(put); HRegionServer rs=UTIL.getRSForFirstRegionInTable(desc.getTableName()); List regions=rs.getOnlineRegions(desc.getTableName()); assertEquals("More than 1 region serving test table with 1 row",1,regions.size()); Region region=regions.get(0); admin.flushRegion(region.getRegionInfo().getRegionName()); CountDownLatch latch=((CompactionCompletionNotifyingRegion)region).getCompactionStateChangeLatch(); put=new Put(Bytes.toBytes("anotherrow")); put.addColumn(A,A,A); table.put(put); admin.flushRegion(region.getRegionInfo().getRegionName()); latch.await(); Get get=new Get(ROW); Result r=table.get(get); assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r,r.listCells()); get=new Get(Bytes.toBytes("anotherrow")); r=table.get(get); assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: " + r,r.listCells()); table.close(); UTIL.shutdownMiniCluster(); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testRegionObserverFlushTimeStacking() throws Exception { byte[] ROW=Bytes.toBytes("testRow"); byte[] TABLE=Bytes.toBytes(getClass().getName()); byte[] A=Bytes.toBytes("A"); byte[][] FAMILIES=new byte[][]{A}; Configuration conf=HBaseConfiguration.create(); Region region=initHRegion(TABLE,getClass().getName(),conf,FAMILIES); RegionCoprocessorHost h=region.getCoprocessorHost(); h.load(NoDataFromFlush.class,Coprocessor.PRIORITY_HIGHEST,conf); h.load(EmptyRegionObsever.class,Coprocessor.PRIORITY_USER,conf); Put put=new Put(ROW); put.addColumn(A,A,A); region.put(put); region.flush(true); Get get=new Get(ROW); Result r=region.get(get); assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r,r.listCells()); HBaseTestingUtility.closeRegionAndWAL(region); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testRegionObserverScanTimeStacking() throws Exception { byte[] ROW=Bytes.toBytes("testRow"); byte[] TABLE=Bytes.toBytes(getClass().getName()); byte[] A=Bytes.toBytes("A"); byte[][] FAMILIES=new byte[][]{A}; Configuration conf=HBaseConfiguration.create(); Region region=initHRegion(TABLE,getClass().getName(),conf,FAMILIES); RegionCoprocessorHost h=region.getCoprocessorHost(); h.load(NoDataFromScan.class,Coprocessor.PRIORITY_HIGHEST,conf); h.load(EmptyRegionObsever.class,Coprocessor.PRIORITY_USER,conf); Put put=new Put(ROW); put.addColumn(A,A,A); region.put(put); Get get=new Get(ROW); Result r=region.get(get); assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r,r.listCells()); HBaseTestingUtility.closeRegionAndWAL(region); }

Class: org.apache.hadoop.hbase.coprocessor.TestRegionServerCoprocessorEndpoint

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEndpointExceptions() throws Exception { final ServerName serverName=TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller=new ServerRpcController(); final BlockingRpcCallback rpcCallback=new BlockingRpcCallback(); DummyRegionServerEndpointProtos.DummyService service=ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class,TEST_UTIL.getHBaseAdmin().coprocessorService(serverName)); service.dummyThrow(controller,DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(),rpcCallback); assertEquals(null,rpcCallback.get()); assertTrue(controller.failedOnException()); assertEquals(WHAT_TO_THROW.getClass().getName().trim(),((RemoteWithExtrasException)controller.getFailedOn().getCause()).getClassName().trim()); }

Class: org.apache.hadoop.hbase.coprocessor.TestRegionServerCoprocessorExceptionWithAbort

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=60000) public void testExceptionFromCoprocessorDuringPut() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,2); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,BuggyRegionObserver.class.getName()); conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY,true); TEST_UTIL.startMiniCluster(2); try { final byte[] TEST_FAMILY=Bytes.toBytes("aaa"); Table table=TEST_UTIL.createMultiRegionTable(TABLE_NAME,TEST_FAMILY); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); final HRegionServer regionServer=TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); try { final byte[] ROW=Bytes.toBytes("aaa"); Put put=new Put(ROW); put.addColumn(TEST_FAMILY,ROW,ROW); table.put(put); } catch ( IOException e) { } boolean aborted=false; for (int i=0; i < 10; i++) { aborted=regionServer.isAborted(); if (aborted) { break; } try { Thread.sleep(1000); } catch ( InterruptedException e) { fail("InterruptedException while waiting for regionserver " + "zk node to be deleted."); } } Assert.assertTrue("The region server should have aborted",aborted); table.close(); } finally { TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.coprocessor.TestRegionServerCoprocessorExceptionWithRemove

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=60000) public void testExceptionFromCoprocessorDuringPut() throws IOException, InterruptedException { TableName TEST_TABLE=TableName.valueOf("observed_table"); byte[] TEST_FAMILY=Bytes.toBytes("aaa"); Table table=TEST_UTIL.createMultiRegionTable(TEST_TABLE,TEST_FAMILY); TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE); HRegionServer regionServer=TEST_UTIL.getRSForFirstRegionInTable(TEST_TABLE); boolean threwIOE=false; try { final byte[] ROW=Bytes.toBytes("aaa"); Put put=new Put(ROW); put.addColumn(TEST_FAMILY,ROW,ROW); table.put(put); table.put(put); } catch ( IOException e) { threwIOE=true; } finally { assertTrue("The regionserver should have thrown an exception",threwIOE); } for (int i=0; i < 10; i++) { assertFalse(regionServer.isAborted()); try { Thread.sleep(1000); } catch ( InterruptedException e) { fail("InterruptedException while waiting for regionserver " + "zk node to be deleted."); } } table.close(); }

Class: org.apache.hadoop.hbase.coprocessor.TestRegionServerObserver

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test verifies the hooks in regions merge. * @throws Exception */ @Test public void testCoprocessorHooksInRegionsMerge() throws Exception { final int NUM_MASTERS=1; final int NUM_RS=1; final String TABLENAME="testRegionServerObserver"; final String TABLENAME2="testRegionServerObserver_2"; final byte[] FAM=Bytes.toBytes("fam"); Configuration conf=HBaseConfiguration.create(); conf.setClass("hbase.coprocessor.regionserver.classes",CPRegionServerObserver.class,RegionServerObserver.class); HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(conf); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); Admin admin=TEST_UTIL.getHBaseAdmin(); try { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HRegionServer regionServer=cluster.getRegionServer(0); RegionServerCoprocessorHost cpHost=regionServer.getRegionServerCoprocessorHost(); Coprocessor coprocessor=cpHost.findCoprocessor(CPRegionServerObserver.class.getName()); CPRegionServerObserver regionServerObserver=(CPRegionServerObserver)coprocessor; HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(TABLENAME)); desc.addFamily(new HColumnDescriptor(FAM)); admin.createTable(desc,new byte[][]{Bytes.toBytes("row")}); desc=new HTableDescriptor(TableName.valueOf(TABLENAME2)); desc.addFamily(new HColumnDescriptor(FAM)); admin.createTable(desc,new byte[][]{Bytes.toBytes("row")}); assertFalse(regionServerObserver.wasRegionMergeCalled()); List regions=regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)); admin.mergeRegions(regions.get(0).getRegionInfo().getEncodedNameAsBytes(),regions.get(1).getRegionInfo().getEncodedNameAsBytes(),true); int regionsCount=regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size(); while (regionsCount != 1) { regionsCount=regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size(); Thread.sleep(1000); } assertTrue(regionServerObserver.wasRegionMergeCalled()); assertTrue(regionServerObserver.wasPreMergeCommit()); assertTrue(regionServerObserver.wasPostMergeCommit()); assertEquals(regionsCount,1); assertEquals(regionServer.getOnlineRegions(TableName.valueOf(TABLENAME2)).size(),1); } finally { if (admin != null) admin.close(); TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.coprocessor.TestRowProcessorEndpoint

InternalCallVerifier EqualityVerifier 
@Test public void testMultipleRows() throws Throwable { prepareTestData(); failures.set(0); int numThreads=100; concurrentExec(new SwapRowsRunner(),numThreads); LOG.debug("row keyvalues:" + stringifyKvs(table.get(new Get(ROW)).listCells())); LOG.debug("row2 keyvalues:" + stringifyKvs(table.get(new Get(ROW2)).listCells())); assertEquals(rowSize,table.get(new Get(ROW)).listCells().size()); assertEquals(row2Size,table.get(new Get(ROW2)).listCells().size()); assertEquals(0,failures.get()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReadModifyWrite() throws Throwable { prepareTestData(); failures.set(0); int numThreads=100; concurrentExec(new IncrementRunner(),numThreads); Get get=new Get(ROW); LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells())); int finalCounter=incrementCounter(table); assertEquals(numThreads + 1,finalCounter); assertEquals(0,failures.get()); }

Class: org.apache.hadoop.hbase.coprocessor.TestWALObserver

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Coprocessors shouldn't get notice of empty waledits. */ @Test public void testEmptyWALEditAreNotSeen() throws Exception { final HRegionInfo hri=createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE)); final HTableDescriptor htd=createBasic3FamilyHTD(Bytes.toString(TEST_TABLE)); final MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); WAL log=wals.getWAL(UNSPECIFIED_REGION,null); try { SampleRegionWALObserver cp=getCoprocessor(log,SampleRegionWALObserver.class); cp.setTestValues(TEST_TABLE,null,null,null,null,null,null,null); assertFalse(cp.isPreWALWriteCalled()); assertFalse(cp.isPostWALWriteCalled()); final long now=EnvironmentEdgeManager.currentTime(); long txid=log.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),hri.getTable(),now,mvcc),new WALEdit(),true); log.sync(txid); assertFalse("Empty WALEdit should skip coprocessor evaluation.",cp.isPreWALWriteCalled()); assertFalse("Empty WALEdit should skip coprocessor evaluation.",cp.isPostWALWriteCalled()); } finally { log.close(); } }

InternalCallVerifier NullVerifier 
/** * Test to see CP loaded successfully or not. There is a duplication at * TestHLog, but the purpose of that one is to see whether the loaded CP will * impact existing WAL tests or not. */ @Test public void testWALObserverLoaded() throws Exception { WAL log=wals.getWAL(UNSPECIFIED_REGION,null); assertNotNull(getCoprocessor(log,SampleRegionWALObserver.class)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testNonLegacyWALKeysDoNotExplode() throws Exception { TableName tableName=TableName.valueOf(TEST_TABLE); final HTableDescriptor htd=createBasic3FamilyHTD(Bytes.toString(TEST_TABLE)); final HRegionInfo hri=new HRegionInfo(tableName,null,null); MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); fs.mkdirs(new Path(FSUtils.getTableDir(hbaseRootDir,tableName),hri.getEncodedName())); final Configuration newConf=HBaseConfiguration.create(this.conf); final WAL wal=wals.getWAL(UNSPECIFIED_REGION,null); final SampleRegionWALObserver newApi=getCoprocessor(wal,SampleRegionWALObserver.class); newApi.setTestValues(TEST_TABLE,TEST_ROW,null,null,null,null,null,null); final SampleRegionWALObserver oldApi=getCoprocessor(wal,SampleRegionWALObserver.Legacy.class); oldApi.setTestValues(TEST_TABLE,TEST_ROW,null,null,null,null,null,null); LOG.debug("ensuring wal entries haven't happened before we start"); assertFalse(newApi.isPreWALWriteCalled()); assertFalse(newApi.isPostWALWriteCalled()); assertFalse(newApi.isPreWALWriteDeprecatedCalled()); assertFalse(newApi.isPostWALWriteDeprecatedCalled()); assertFalse(oldApi.isPreWALWriteCalled()); assertFalse(oldApi.isPostWALWriteCalled()); assertFalse(oldApi.isPreWALWriteDeprecatedCalled()); assertFalse(oldApi.isPostWALWriteDeprecatedCalled()); LOG.debug("writing to WAL with non-legacy keys."); final int countPerFamily=5; for ( HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName,hri,TEST_ROW,hcd.getName(),countPerFamily,EnvironmentEdgeManager.getDelegate(),wal,htd,mvcc); } LOG.debug("Verify that only the non-legacy CP saw edits."); assertTrue(newApi.isPreWALWriteCalled()); assertTrue(newApi.isPostWALWriteCalled()); assertFalse(newApi.isPreWALWriteDeprecatedCalled()); assertFalse(newApi.isPostWALWriteDeprecatedCalled()); assertFalse(oldApi.isPreWALWriteCalled()); assertFalse(oldApi.isPostWALWriteCalled()); assertFalse(oldApi.isPreWALWriteDeprecatedCalled()); assertFalse(oldApi.isPostWALWriteDeprecatedCalled()); LOG.debug("reseting cp state."); newApi.setTestValues(TEST_TABLE,TEST_ROW,null,null,null,null,null,null); oldApi.setTestValues(TEST_TABLE,TEST_ROW,null,null,null,null,null,null); LOG.debug("write a log edit that supports legacy cps."); final long now=EnvironmentEdgeManager.currentTime(); final WALKey legacyKey=new HLogKey(hri.getEncodedNameAsBytes(),hri.getTable(),now); final WALEdit edit=new WALEdit(); final byte[] nonce=Bytes.toBytes("1772"); edit.add(new KeyValue(TEST_ROW,TEST_FAMILY[0],nonce,now,nonce)); final long txid=wal.append(htd,hri,legacyKey,edit,true); wal.sync(txid); LOG.debug("Make sure legacy cps can see supported edits after having been skipped."); assertTrue("non-legacy WALObserver didn't see pre-write.",newApi.isPreWALWriteCalled()); assertTrue("non-legacy WALObserver didn't see post-write.",newApi.isPostWALWriteCalled()); assertFalse("non-legacy WALObserver shouldn't have seen legacy pre-write.",newApi.isPreWALWriteDeprecatedCalled()); assertFalse("non-legacy WALObserver shouldn't have seen legacy post-write.",newApi.isPostWALWriteDeprecatedCalled()); assertTrue("legacy WALObserver didn't see pre-write.",oldApi.isPreWALWriteCalled()); assertTrue("legacy WALObserver didn't see post-write.",oldApi.isPostWALWriteCalled()); assertTrue("legacy WALObserver didn't see legacy pre-write.",oldApi.isPreWALWriteDeprecatedCalled()); assertTrue("legacy WALObserver didn't see legacy post-write.",oldApi.isPostWALWriteDeprecatedCalled()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test WAL replay behavior with WALObserver. */ @Test public void testWALCoprocessorReplay() throws Exception { TableName tableName=TableName.valueOf("testWALCoprocessorReplay"); final HTableDescriptor htd=getBasic3FamilyHTableDescriptor(tableName); MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); final HRegionInfo hri=new HRegionInfo(tableName,null,null); final Path basedir=FSUtils.getTableDir(this.hbaseRootDir,tableName); deleteDir(basedir); fs.mkdirs(new Path(basedir,hri.getEncodedName())); final Configuration newConf=HBaseConfiguration.create(this.conf); WAL wal=wals.getWAL(UNSPECIFIED_REGION,null); WALEdit edit=new WALEdit(); long now=EnvironmentEdgeManager.currentTime(); final int countPerFamily=1000; for ( HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName,hri,TEST_ROW,hcd.getName(),countPerFamily,EnvironmentEdgeManager.getDelegate(),wal,htd,mvcc); } wal.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),tableName,now,mvcc),edit,true); wal.sync(); User user=HBaseTestingUtility.getDifferentUser(newConf,".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction(){ public Object run() throws Exception { Path p=runWALSplit(newConf); LOG.info("WALSplit path == " + p); FileSystem newFS=FileSystem.get(newConf); final WALFactory wals2=new WALFactory(conf,null,currentTest.getMethodName() + "2"); WAL wal2=wals2.getWAL(UNSPECIFIED_REGION,null); ; HRegion region=HRegion.openHRegion(newConf,FileSystem.get(newConf),hbaseRootDir,hri,htd,wal2,TEST_UTIL.getHBaseCluster().getRegionServer(0),null); long seqid2=region.getOpenSeqNum(); SampleRegionWALObserver cp2=(SampleRegionWALObserver)region.getCoprocessorHost().findCoprocessor(SampleRegionWALObserver.class.getName()); assertNotNull(cp2); assertTrue(cp2.isPreWALRestoreCalled()); assertTrue(cp2.isPostWALRestoreCalled()); assertFalse(cp2.isPreWALRestoreDeprecatedCalled()); assertFalse(cp2.isPostWALRestoreDeprecatedCalled()); region.close(); wals2.close(); return null; } } ); }

Class: org.apache.hadoop.hbase.errorhandling.TestForeignExceptionDispatcher

InternalCallVerifier BooleanVerifier 
@Test public void testSingleDispatcherWithTimer(){ ForeignExceptionListener listener1=Mockito.mock(ForeignExceptionListener.class); ForeignExceptionListener listener2=Mockito.mock(ForeignExceptionListener.class); ForeignExceptionDispatcher monitor=new ForeignExceptionDispatcher(); monitor.addListener(listener1); monitor.addListener(listener2); TimeoutExceptionInjector timer=new TimeoutExceptionInjector(monitor,1000); timer.start(); timer.trigger(); assertTrue("Monitor didn't get timeout",monitor.hasException()); Mockito.verify(listener1).receive(Mockito.any(ForeignException.class)); Mockito.verify(listener2).receive(Mockito.any(ForeignException.class)); }

Class: org.apache.hadoop.hbase.errorhandling.TestForeignExceptionSerialization

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Compare that a generic exception's stack trace has the same stack trace elements after * serialization and deserialization * @throws InvalidProtocolBufferException */ @Test public void testRemoteFromLocal() throws InvalidProtocolBufferException { String errorMsg="some message"; Exception generic=new Exception(errorMsg); generic.printStackTrace(); assertTrue(generic.getMessage().contains(errorMsg)); ForeignException e=ForeignException.deserialize(ForeignException.serialize(srcName,generic)); assertArrayEquals("Local stack trace got corrupted",generic.getStackTrace(),e.getCause().getStackTrace()); e.printStackTrace(); assertTrue(e.getCause().getCause() == null); assertTrue(e.getCause().getMessage().contains(errorMsg)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Verify that we get back similar stack trace information before an after serialization. * @throws InvalidProtocolBufferException */ @Test public void testSimpleException() throws InvalidProtocolBufferException { String data="some bytes"; ForeignException in=new ForeignException("SRC",new IllegalArgumentException(data)); ForeignException e=ForeignException.deserialize(ForeignException.serialize(srcName,in)); assertNotNull(e); StackTraceElement elem=new StackTraceElement(this.getClass().toString(),"method","file",1); in.setStackTrace(new StackTraceElement[]{elem}); e=ForeignException.deserialize(ForeignException.serialize(srcName,in)); assertNotNull(e); assertEquals("Stack trace got corrupted",elem,e.getCause().getStackTrace()[0]); assertEquals("Got an unexpectedly long stack trace",1,e.getCause().getStackTrace().length); }

Class: org.apache.hadoop.hbase.executor.TestExecutorService

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testExecutorService() throws Exception { int maxThreads=5; int maxTries=10; int sleepInterval=10; Server mockedServer=mock(Server.class); when(mockedServer.getConfiguration()).thenReturn(HBaseConfiguration.create()); ExecutorService executorService=new ExecutorService("unit_test"); executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,maxThreads); Executor executor=executorService.getExecutor(ExecutorType.MASTER_SERVER_OPERATIONS); ThreadPoolExecutor pool=executor.threadPoolExecutor; assertEquals(0,pool.getPoolSize()); AtomicBoolean lock=new AtomicBoolean(true); AtomicInteger counter=new AtomicInteger(0); for (int i=0; i < maxThreads; i++) { executorService.submit(new TestEventHandler(mockedServer,EventType.M_SERVER_SHUTDOWN,lock,counter)); } int tries=0; while (counter.get() < maxThreads && tries < maxTries) { LOG.info("Waiting for all event handlers to start..."); Thread.sleep(sleepInterval); tries++; } assertEquals(maxThreads,counter.get()); assertEquals(maxThreads,pool.getPoolSize()); ExecutorStatus status=executor.getStatus(); assertTrue(status.queuedEvents.isEmpty()); assertEquals(5,status.running.size()); checkStatusDump(status); synchronized (lock) { lock.set(false); lock.notifyAll(); } while (counter.get() < (maxThreads * 2) && tries < maxTries) { System.out.println("Waiting for all event handlers to finish..."); Thread.sleep(sleepInterval); tries++; } assertEquals(maxThreads * 2,counter.get()); assertEquals(maxThreads,pool.getPoolSize()); for (int i=0; i < (2 * maxThreads); i++) { executorService.submit(new TestEventHandler(mockedServer,EventType.M_SERVER_SHUTDOWN,lock,counter)); } synchronized (lock) { lock.set(false); lock.notifyAll(); } Thread.sleep(ExecutorService.Executor.keepAliveTimeInMillis * 2); assertEquals(maxThreads,pool.getPoolSize()); executorService.shutdown(); assertEquals(0,executorService.getAllExecutorStatuses().size()); executorService.submit(new TestEventHandler(mockedServer,EventType.M_SERVER_SHUTDOWN,lock,counter)); }

Class: org.apache.hadoop.hbase.filter.TestColumnRangeFilter

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void TestColumnRangeFilterClient() throws Exception { String family="Family"; String table="TestColumnRangeFilterClient"; Table ht=TEST_UTIL.createTable(TableName.valueOf(table),Bytes.toBytes(family),Integer.MAX_VALUE); List rows=generateRandomWords(10,8); long maxTimestamp=2; List columns=generateRandomWords(20000,8); List kvList=new ArrayList(); Map> rangeMap=new HashMap>(); rangeMap.put(new StringRange(null,true,"b",false),new ArrayList()); rangeMap.put(new StringRange("p",true,"q",false),new ArrayList()); rangeMap.put(new StringRange("r",false,"s",true),new ArrayList()); rangeMap.put(new StringRange("z",false,null,false),new ArrayList()); String valueString="ValueString"; for ( String row : rows) { Put p=new Put(Bytes.toBytes(row)); p.setDurability(Durability.SKIP_WAL); for ( String column : columns) { for (long timestamp=1; timestamp <= maxTimestamp; timestamp++) { KeyValue kv=KeyValueTestUtil.create(row,family,column,timestamp,valueString); p.add(kv); kvList.add(kv); for ( StringRange s : rangeMap.keySet()) { if (s.inRange(column)) { rangeMap.get(s).add(kv); } } } } ht.put(p); } TEST_UTIL.flush(); ColumnRangeFilter filter; Scan scan=new Scan(); scan.setMaxVersions(); for ( StringRange s : rangeMap.keySet()) { filter=new ColumnRangeFilter(s.getStart() == null ? null : Bytes.toBytes(s.getStart()),s.isStartInclusive(),s.getEnd() == null ? null : Bytes.toBytes(s.getEnd()),s.isEndInclusive()); scan.setFilter(filter); ResultScanner scanner=ht.getScanner(scan); List results=new ArrayList(); LOG.info("scan column range: " + s.toString()); long timeBeforeScan=System.currentTimeMillis(); Result result; while ((result=scanner.next()) != null) { for ( Cell kv : result.listCells()) { results.add(kv); } } long scanTime=System.currentTimeMillis() - timeBeforeScan; scanner.close(); LOG.info("scan time = " + scanTime + "ms"); LOG.info("found " + results.size() + " results"); LOG.info("Expecting " + rangeMap.get(s).size() + " results"); assertEquals(rangeMap.get(s).size(),results.size()); } ht.close(); }

Class: org.apache.hadoop.hbase.filter.TestComparators

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCellFieldsCompare() throws Exception { byte[] r0=Bytes.toBytes("row0"); byte[] r1=Bytes.toBytes("row1"); byte[] r2=Bytes.toBytes("row2"); byte[] f=Bytes.toBytes("cf1"); byte[] q1=Bytes.toBytes("qual1"); byte[] q2=Bytes.toBytes("qual2"); byte[] q3=Bytes.toBytes("r"); long l1=1234L; byte[] v1=Bytes.toBytes(l1); long l2=2000L; byte[] v2=Bytes.toBytes(l2); KeyValue kv=new KeyValue(r1,f,q1,v1); ByteBuffer buffer=ByteBuffer.wrap(kv.getBuffer()); Cell bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); ByteArrayComparable comparable=new BinaryComparator(r1); assertEquals(0,CellComparator.compareRow(bbCell,comparable)); assertEquals(0,CellComparator.compareRow(kv,comparable)); kv=new KeyValue(r0,f,q1,v1); buffer=ByteBuffer.wrap(kv.getBuffer()); bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertTrue(CellComparator.compareRow(bbCell,comparable) > 0); assertTrue(CellComparator.compareRow(kv,comparable) > 0); kv=new KeyValue(r2,f,q1,v1); buffer=ByteBuffer.wrap(kv.getBuffer()); bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertTrue(CellComparator.compareRow(bbCell,comparable) < 0); assertTrue(CellComparator.compareRow(kv,comparable) < 0); comparable=new BinaryPrefixComparator(Bytes.toBytes("qual")); assertEquals(0,CellComparator.compareQualifier(bbCell,comparable)); assertEquals(0,CellComparator.compareQualifier(kv,comparable)); kv=new KeyValue(r2,f,q2,v1); buffer=ByteBuffer.wrap(kv.getBuffer()); bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertEquals(0,CellComparator.compareQualifier(bbCell,comparable)); assertEquals(0,CellComparator.compareQualifier(kv,comparable)); kv=new KeyValue(r2,f,q3,v1); buffer=ByteBuffer.wrap(kv.getBuffer()); bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertTrue(CellComparator.compareQualifier(bbCell,comparable) < 0); assertTrue(CellComparator.compareQualifier(kv,comparable) < 0); comparable=new LongComparator(l1); assertEquals(0,CellComparator.compareValue(bbCell,comparable)); assertEquals(0,CellComparator.compareValue(kv,comparable)); kv=new KeyValue(r1,f,q1,v2); buffer=ByteBuffer.wrap(kv.getBuffer()); bbCell=new ByteBufferedCellImpl(buffer,0,buffer.remaining()); assertTrue(CellComparator.compareValue(bbCell,comparable) < 0); assertTrue(CellComparator.compareValue(kv,comparable) < 0); comparable=new SubstringComparator("cf"); assertEquals(0,CellComparator.compareFamily(bbCell,comparable)); assertEquals(0,CellComparator.compareFamily(kv,comparable)); }

Class: org.apache.hadoop.hbase.filter.TestDependentColumnFilter

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test for HBASE-8794. Avoid NullPointerException in DependentColumnFilter.toString(). */ @Test public void testToStringWithNullComparator(){ Filter filter=new DependentColumnFilter(FAMILIES[0],QUALIFIER); assertNotNull(filter.toString()); assertTrue("check string contains 'null' as compatator is null",filter.toString().contains("null")); filter=new DependentColumnFilter(FAMILIES[0],QUALIFIER,true,CompareOp.EQUAL,null); assertNotNull(filter.toString()); assertTrue("check string contains 'null' as compatator is null",filter.toString().contains("null")); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testToStringWithNonNullComparator(){ Filter filter=new DependentColumnFilter(FAMILIES[0],QUALIFIER,true,CompareOp.EQUAL,new BinaryComparator(MATCH_VAL)); assertNotNull(filter.toString()); assertTrue("check string contains comparator value",filter.toString().contains("match")); }

Class: org.apache.hadoop.hbase.filter.TestFilterList

InternalCallVerifier EqualityVerifier 
/** * Tests the behavior of transform() in a hierarchical filter. * transform() only applies after a filterKeyValue() whose return-code includes the KeyValue. * Lazy evaluation of AND */ @Test public void testTransformMPO() throws Exception { final FilterList flist=new FilterList(Operator.MUST_PASS_ONE,Lists.newArrayList(new FilterList(Operator.MUST_PASS_ALL,Lists.newArrayList(new FamilyFilter(CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("fam"))),new QualifierFilter(CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("qual1"))),new KeyOnlyFilter())),new FilterList(Operator.MUST_PASS_ALL,Lists.newArrayList(new FamilyFilter(CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("fam"))),new QualifierFilter(CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes("qual2"))))))); final KeyValue kvQual1=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("fam"),Bytes.toBytes("qual1"),Bytes.toBytes("value")); final KeyValue kvQual2=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("fam"),Bytes.toBytes("qual2"),Bytes.toBytes("value")); final KeyValue kvQual3=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("fam"),Bytes.toBytes("qual3"),Bytes.toBytes("value")); assertEquals(Filter.ReturnCode.INCLUDE,flist.filterKeyValue(kvQual1)); final KeyValue transformedQual1=KeyValueUtil.ensureKeyValue(flist.transformCell(kvQual1)); assertEquals(0,transformedQual1.getValueLength()); assertEquals(Filter.ReturnCode.INCLUDE,flist.filterKeyValue(kvQual2)); final KeyValue transformedQual2=KeyValueUtil.ensureKeyValue(flist.transformCell(kvQual2)); assertEquals("value",Bytes.toString(transformedQual2.getValueArray(),transformedQual2.getValueOffset(),transformedQual2.getValueLength())); assertEquals(Filter.ReturnCode.SKIP,flist.filterKeyValue(kvQual3)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test pass-thru of hints. */ @Test public void testHintPassThru() throws Exception { final KeyValue minKeyValue=new KeyValue(Bytes.toBytes(0L),null,null); final KeyValue maxKeyValue=new KeyValue(Bytes.toBytes(Long.MAX_VALUE),null,null); Filter filterNoHint=new FilterBase(){ @Override public byte[] toByteArray(){ return null; } @Override public ReturnCode filterKeyValue( Cell ignored) throws IOException { return ReturnCode.INCLUDE; } } ; Filter filterMinHint=new FilterBase(){ @Override public ReturnCode filterKeyValue( Cell ignored){ return ReturnCode.SEEK_NEXT_USING_HINT; } @Override public Cell getNextCellHint( Cell currentKV){ return minKeyValue; } @Override public byte[] toByteArray(){ return null; } } ; Filter filterMaxHint=new FilterBase(){ @Override public ReturnCode filterKeyValue( Cell ignored){ return ReturnCode.SEEK_NEXT_USING_HINT; } @Override public Cell getNextCellHint( Cell cell){ return new KeyValue(Bytes.toBytes(Long.MAX_VALUE),null,null); } @Override public byte[] toByteArray(){ return null; } } ; FilterList filterList=new FilterList(Operator.MUST_PASS_ONE,Arrays.asList(new Filter[]{filterMinHint,filterMaxHint})); assertEquals(0,CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),minKeyValue)); filterList=new FilterList(Operator.MUST_PASS_ONE,Arrays.asList(new Filter[]{filterMinHint,filterMaxHint,filterNoHint})); assertNull(filterList.getNextCellHint(null)); filterList=new FilterList(Operator.MUST_PASS_ONE,Arrays.asList(new Filter[]{filterNoHint,filterMaxHint})); assertNull(filterList.getNextCellHint(null)); filterList=new FilterList(Operator.MUST_PASS_ONE,Arrays.asList(new Filter[]{filterMaxHint,filterMaxHint})); assertEquals(0,CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),maxKeyValue)); filterList=new FilterList(Operator.MUST_PASS_ALL,Arrays.asList(new Filter[]{filterMinHint,filterMaxHint})); filterList.filterKeyValue(null); assertEquals(0,CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),minKeyValue)); filterList=new FilterList(Operator.MUST_PASS_ALL,Arrays.asList(new Filter[]{filterMaxHint,filterMinHint})); filterList.filterKeyValue(null); assertEquals(0,CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),maxKeyValue)); filterList=new FilterList(Operator.MUST_PASS_ALL,Arrays.asList(new Filter[]{filterNoHint,filterMinHint,filterMaxHint})); filterList.filterKeyValue(null); assertEquals(0,CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),minKeyValue)); filterList=new FilterList(Operator.MUST_PASS_ALL,Arrays.asList(new Filter[]{filterNoHint,filterMaxHint})); filterList.filterKeyValue(null); assertEquals(0,CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),maxKeyValue)); filterList=new FilterList(Operator.MUST_PASS_ALL,Arrays.asList(new Filter[]{filterNoHint,filterMinHint})); filterList.filterKeyValue(null); assertEquals(0,CellComparator.COMPARATOR.compare(filterList.getNextCellHint(null),minKeyValue)); }

Class: org.apache.hadoop.hbase.filter.TestFilterSerialization

InternalCallVerifier BooleanVerifier 
@Test public void testFilterList() throws Exception { FilterList filterList=new FilterList(new LinkedList()); assertTrue(filterList.areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterList)))); LinkedList list=new LinkedList(); list.add(new ColumnCountGetFilter(1)); list.add(new RowFilter(CompareFilter.CompareOp.EQUAL,new SubstringComparator("testFilterList"))); assertTrue(filterList.areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterList)))); }

Class: org.apache.hadoop.hbase.filter.TestLongComparator

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSimple(){ for (int i=1; i < values.length; i++) { for (int j=0; j < i; j++) { LongComparator cp=new LongComparator(values[i]); assertEquals(1,cp.compareTo(Bytes.toBytes(values[j]))); ByteBuffer data_bb=ByteBuffer.wrap(Bytes.toBytes(values[j])); assertEquals(1,cp.compareTo(data_bb,0,data_bb.capacity())); } } }

Class: org.apache.hadoop.hbase.filter.TestMultiRowRangeFilter

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeFilterWithEmptyStopRow() throws IOException { tableName=TableName.valueOf("testMultiRowRangeFilterWithEmptyStopRow"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges=new ArrayList(); ranges.add(new RowRange(Bytes.toBytes(10),true,Bytes.toBytes(""),false)); ranges.add(new RowRange(Bytes.toBytes(30),true,Bytes.toBytes(40),false)); MultiRowRangeFilter filter=new MultiRowRangeFilter(ranges); scan.setFilter(filter); int resultsSize=getResultsSize(ht,scan); List results1=getScanResult(Bytes.toBytes(10),Bytes.toBytes(""),ht); assertEquals(results1.size(),resultsSize); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeFilterWithRangeOverlap() throws IOException { tableName=TableName.valueOf("testMultiRowRangeFilterWithRangeOverlap"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges=new ArrayList(); ranges.add(new RowRange(Bytes.toBytes(10),true,Bytes.toBytes(20),false)); ranges.add(new RowRange(Bytes.toBytes(15),true,Bytes.toBytes(40),false)); ranges.add(new RowRange(Bytes.toBytes(65),true,Bytes.toBytes(75),false)); ranges.add(new RowRange(Bytes.toBytes(60),true,null,false)); ranges.add(new RowRange(Bytes.toBytes(60),true,Bytes.toBytes(80),false)); MultiRowRangeFilter filter=new MultiRowRangeFilter(ranges); scan.setFilter(filter); int resultsSize=getResultsSize(ht,scan); LOG.info("found " + resultsSize + " results"); List results1=getScanResult(Bytes.toBytes(10),Bytes.toBytes(40),ht); List results2=getScanResult(Bytes.toBytes(60),Bytes.toBytes(""),ht); assertEquals(results1.size() + results2.size(),resultsSize); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeFilterWithInclusive() throws IOException { tableName=TableName.valueOf("testMultiRowRangeFilterWithInclusive"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges=new ArrayList(); ranges.add(new RowRange(Bytes.toBytes(10),true,Bytes.toBytes(20),false)); ranges.add(new RowRange(Bytes.toBytes(20),true,Bytes.toBytes(40),false)); ranges.add(new RowRange(Bytes.toBytes(65),true,Bytes.toBytes(75),false)); ranges.add(new RowRange(Bytes.toBytes(60),true,null,false)); ranges.add(new RowRange(Bytes.toBytes(60),true,Bytes.toBytes(80),false)); MultiRowRangeFilter filter=new MultiRowRangeFilter(ranges); scan.setFilter(filter); int resultsSize=getResultsSize(ht,scan); LOG.info("found " + resultsSize + " results"); List results1=getScanResult(Bytes.toBytes(10),Bytes.toBytes(40),ht); List results2=getScanResult(Bytes.toBytes(60),Bytes.toBytes(""),ht); assertEquals(results1.size() + results2.size(),resultsSize); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeWithFilterListAndOperator() throws IOException { tableName=TableName.valueOf("TestMultiRowRangeFilterWithFilterListAndOperator"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges1=new ArrayList(); ranges1.add(new RowRange(Bytes.toBytes(10),true,Bytes.toBytes(20),false)); ranges1.add(new RowRange(Bytes.toBytes(30),true,Bytes.toBytes(40),false)); ranges1.add(new RowRange(Bytes.toBytes(60),true,Bytes.toBytes(70),false)); MultiRowRangeFilter filter1=new MultiRowRangeFilter(ranges1); List ranges2=new ArrayList(); ranges2.add(new RowRange(Bytes.toBytes(20),true,Bytes.toBytes(40),false)); ranges2.add(new RowRange(Bytes.toBytes(80),true,Bytes.toBytes(90),false)); MultiRowRangeFilter filter2=new MultiRowRangeFilter(ranges2); FilterList filterList=new FilterList(FilterList.Operator.MUST_PASS_ALL); filterList.addFilter(filter1); filterList.addFilter(filter2); scan.setFilter(filterList); int resultsSize=getResultsSize(ht,scan); LOG.info("found " + resultsSize + " results"); List results1=getScanResult(Bytes.toBytes(30),Bytes.toBytes(40),ht); assertEquals(results1.size(),resultsSize); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeWithFilterListOrOperator() throws IOException { tableName=TableName.valueOf("TestMultiRowRangeFilterWithFilterListOrOperator"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges1=new ArrayList(); ranges1.add(new RowRange(Bytes.toBytes(30),true,Bytes.toBytes(40),false)); ranges1.add(new RowRange(Bytes.toBytes(10),true,Bytes.toBytes(20),false)); ranges1.add(new RowRange(Bytes.toBytes(60),true,Bytes.toBytes(70),false)); MultiRowRangeFilter filter1=new MultiRowRangeFilter(ranges1); List ranges2=new ArrayList(); ranges2.add(new RowRange(Bytes.toBytes(20),true,Bytes.toBytes(40),false)); ranges2.add(new RowRange(Bytes.toBytes(80),true,Bytes.toBytes(90),false)); MultiRowRangeFilter filter2=new MultiRowRangeFilter(ranges2); FilterList filterList=new FilterList(FilterList.Operator.MUST_PASS_ONE); filterList.addFilter(filter1); filterList.addFilter(filter2); scan.setFilter(filterList); int resultsSize=getResultsSize(ht,scan); LOG.info("found " + resultsSize + " results"); List results1=getScanResult(Bytes.toBytes(10),Bytes.toBytes(40),ht); List results2=getScanResult(Bytes.toBytes(60),Bytes.toBytes(70),ht); List results3=getScanResult(Bytes.toBytes(80),Bytes.toBytes(90),ht); assertEquals(results1.size() + results2.size() + results3.size(),resultsSize); ht.close(); }

InternalCallVerifier EqualityVerifier 
@Test public void testRanges() throws IOException { byte[] key1Start=new byte[]{-3}; byte[] key1End=new byte[]{-2}; byte[] key2Start=new byte[]{5}; byte[] key2End=new byte[]{6}; byte[] badKey=new byte[]{-10}; MultiRowRangeFilter filter=new MultiRowRangeFilter(Arrays.asList(new MultiRowRangeFilter.RowRange(key1Start,true,key1End,false),new MultiRowRangeFilter.RowRange(key2Start,true,key2End,false))); filter.filterRowKey(KeyValueUtil.createFirstOnRow(badKey)); assertEquals(Filter.ReturnCode.SEEK_NEXT_USING_HINT,filter.filterKeyValue(null)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeFilterWithoutRangeOverlap() throws IOException { tableName=TableName.valueOf("testMultiRowRangeFilterWithoutRangeOverlap"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges=new ArrayList(); ranges.add(new RowRange(Bytes.toBytes(30),true,Bytes.toBytes(40),false)); ranges.add(new RowRange(Bytes.toBytes(10),true,Bytes.toBytes(20),false)); ranges.add(new RowRange(Bytes.toBytes(60),true,Bytes.toBytes(70),false)); MultiRowRangeFilter filter=new MultiRowRangeFilter(ranges); scan.setFilter(filter); int resultsSize=getResultsSize(ht,scan); LOG.info("found " + resultsSize + " results"); List results1=getScanResult(Bytes.toBytes(10),Bytes.toBytes(20),ht); List results2=getScanResult(Bytes.toBytes(30),Bytes.toBytes(40),ht); List results3=getScanResult(Bytes.toBytes(60),Bytes.toBytes(70),ht); assertEquals(results1.size() + results2.size() + results3.size(),resultsSize); ht.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeFilterWithExclusive() throws IOException { tableName=TableName.valueOf("testMultiRowRangeFilterWithExclusive"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges=new ArrayList(); ranges.add(new RowRange(Bytes.toBytes(10),true,Bytes.toBytes(20),false)); ranges.add(new RowRange(Bytes.toBytes(20),false,Bytes.toBytes(40),false)); ranges.add(new RowRange(Bytes.toBytes(65),true,Bytes.toBytes(75),false)); MultiRowRangeFilter filter=new MultiRowRangeFilter(ranges); scan.setFilter(filter); int resultsSize=getResultsSize(ht,scan); LOG.info("found " + resultsSize + " results"); List results1=getScanResult(Bytes.toBytes(10),Bytes.toBytes(40),ht); List results2=getScanResult(Bytes.toBytes(65),Bytes.toBytes(75),ht); assertEquals((results1.size() - 1) + results2.size(),resultsSize); ht.close(); }

InternalCallVerifier EqualityVerifier 
@Test public void testOutOfOrderScannerNextException() throws Exception { MultiRowRangeFilter filter=new MultiRowRangeFilter(Arrays.asList(new MultiRowRangeFilter.RowRange(Bytes.toBytes("b"),true,Bytes.toBytes("c"),true),new MultiRowRangeFilter.RowRange(Bytes.toBytes("d"),true,Bytes.toBytes("e"),true))); filter.filterRowKey(KeyValueUtil.createFirstOnRow(Bytes.toBytes("a"))); assertEquals(Filter.ReturnCode.SEEK_NEXT_USING_HINT,filter.filterKeyValue(null)); filter.filterRowKey(KeyValueUtil.createFirstOnRow(Bytes.toBytes("b"))); assertEquals(Filter.ReturnCode.INCLUDE,filter.filterKeyValue(null)); filter.filterRowKey(KeyValueUtil.createFirstOnRow(Bytes.toBytes("c"))); assertEquals(Filter.ReturnCode.INCLUDE,filter.filterKeyValue(null)); filter.filterRowKey(KeyValueUtil.createFirstOnRow(Bytes.toBytes("d"))); assertEquals(Filter.ReturnCode.INCLUDE,filter.filterKeyValue(null)); filter.filterRowKey(KeyValueUtil.createFirstOnRow(Bytes.toBytes("e"))); assertEquals(Filter.ReturnCode.INCLUDE,filter.filterKeyValue(null)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiRowRangeFilterWithEmptyStartRow() throws IOException { tableName=TableName.valueOf("testMultiRowRangeFilterWithEmptyStartRow"); Table ht=TEST_UTIL.createTable(tableName,family,Integer.MAX_VALUE); generateRows(numRows,ht,family,qf,value); Scan scan=new Scan(); scan.setMaxVersions(); List ranges=new ArrayList(); ranges.add(new RowRange(Bytes.toBytes(""),true,Bytes.toBytes(10),false)); ranges.add(new RowRange(Bytes.toBytes(30),true,Bytes.toBytes(40),false)); MultiRowRangeFilter filter=new MultiRowRangeFilter(ranges); scan.setFilter(filter); int resultsSize=getResultsSize(ht,scan); List results1=getScanResult(Bytes.toBytes(""),Bytes.toBytes(10),ht); List results2=getScanResult(Bytes.toBytes(30),Bytes.toBytes(40),ht); assertEquals(results1.size() + results2.size(),resultsSize); ht.close(); }

Class: org.apache.hadoop.hbase.filter.TestNullComparator

InternalCallVerifier EqualityVerifier 
@Test public void testNullValue(){ byte[] value=null; NullComparator comparator=new NullComparator(); int comp1=comparator.compareTo(value); int comp2=comparator.compareTo(value,5,15); Assert.assertEquals(0,comp1); Assert.assertEquals(0,comp2); }

InternalCallVerifier EqualityVerifier 
@Test public void testNonNullValue(){ byte[] value=new byte[]{0,1,2,3,4,5}; NullComparator comparator=new NullComparator(); int comp1=comparator.compareTo(value); int comp2=comparator.compareTo(value,1,3); Assert.assertEquals(1,comp1); Assert.assertEquals(1,comp2); }

InternalCallVerifier EqualityVerifier 
@Test public void testEmptyValue(){ byte[] value=new byte[]{0}; NullComparator comparator=new NullComparator(); int comp1=comparator.compareTo(value); int comp2=comparator.compareTo(value,1,3); Assert.assertEquals(1,comp1); Assert.assertEquals(1,comp2); }

Class: org.apache.hadoop.hbase.filter.TestParseFilter

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompoundFilter3() throws IOException { String filterString=" ColumnPrefixFilter ('realtime')AND " + "FirstKeyOnlyFilter() OR SKIP FamilyFilter(=, 'substring:hihi')"; FilterList filterList=doTestFilter(filterString,FilterList.class); ArrayList filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof FilterList); assertTrue(filters.get(1) instanceof SkipFilter); filterList=(FilterList)filters.get(0); SkipFilter skipFilter=(SkipFilter)filters.get(1); filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof ColumnPrefixFilter); assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); ColumnPrefixFilter columnPrefixFilter=(ColumnPrefixFilter)filters.get(0); byte[] columnPrefix=columnPrefixFilter.getPrefix(); assertEquals(new String(columnPrefix),"realtime"); FirstKeyOnlyFilter firstKeyOnlyFilter=(FirstKeyOnlyFilter)filters.get(1); assertTrue(skipFilter.getFilter() instanceof FamilyFilter); FamilyFilter familyFilter=(FamilyFilter)skipFilter.getFilter(); assertEquals(CompareFilter.CompareOp.EQUAL,familyFilter.getOperator()); assertTrue(familyFilter.getComparator() instanceof SubstringComparator); SubstringComparator substringComparator=(SubstringComparator)familyFilter.getComparator(); assertEquals("hihi",new String(substringComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPrecedence2() throws IOException { String filterString=" PrefixFilter ('realtime')AND SKIP FirstKeyOnlyFilter()" + "OR KeyOnlyFilter()"; FilterList filterList=doTestFilter(filterString,FilterList.class); ArrayList filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof FilterList); assertTrue(filters.get(1) instanceof KeyOnlyFilter); filterList=(FilterList)filters.get(0); filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof PrefixFilter); assertTrue(filters.get(1) instanceof SkipFilter); PrefixFilter prefixFilter=(PrefixFilter)filters.get(0); byte[] prefix=prefixFilter.getPrefix(); assertEquals(new String(prefix),"realtime"); SkipFilter skipFilter=(SkipFilter)filters.get(1); assertTrue(skipFilter.getFilter() instanceof FirstKeyOnlyFilter); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTimestampsFilter() throws IOException { String filterString="TimestampsFilter(9223372036854775806, 6)"; TimestampsFilter timestampsFilter=doTestFilter(filterString,TimestampsFilter.class); List timestamps=timestampsFilter.getTimestamps(); assertEquals(timestamps.size(),2); assertEquals(timestamps.get(0),new Long(6)); filterString="TimestampsFilter()"; timestampsFilter=doTestFilter(filterString,TimestampsFilter.class); timestamps=timestampsFilter.getTimestamps(); assertEquals(timestamps.size(),0); filterString="TimestampsFilter(9223372036854775808, 6)"; try { doTestFilter(filterString,ColumnPaginationFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println("Long Argument was too large"); } filterString="TimestampsFilter(-45, 6)"; try { doTestFilter(filterString,ColumnPaginationFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println("Timestamp Arguments should not be negative"); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testQualifierFilter() throws IOException { String filterString="QualifierFilter(=, 'regexstring:pre*')"; QualifierFilter qualifierFilter=doTestFilter(filterString,QualifierFilter.class); assertEquals(CompareFilter.CompareOp.EQUAL,qualifierFilter.getOperator()); assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); RegexStringComparator regexStringComparator=(RegexStringComparator)qualifierFilter.getComparator(); assertEquals("pre*",new String(regexStringComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPrefixFilter() throws IOException { String filterString=" PrefixFilter('row' ) "; PrefixFilter prefixFilter=doTestFilter(filterString,PrefixFilter.class); byte[] prefix=prefixFilter.getPrefix(); assertEquals(new String(prefix),"row"); filterString=" PrefixFilter(row)"; try { doTestFilter(filterString,PrefixFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println(e.getMessage()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompoundFilter4() throws IOException { String filterString=" ColumnPrefixFilter ('realtime') OR " + "FirstKeyOnlyFilter() OR SKIP FamilyFilter(=, 'substring:hihi')"; FilterList filterList=doTestFilter(filterString,FilterList.class); ArrayList filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof ColumnPrefixFilter); assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); assertTrue(filters.get(2) instanceof SkipFilter); ColumnPrefixFilter columnPrefixFilter=(ColumnPrefixFilter)filters.get(0); FirstKeyOnlyFilter firstKeyOnlyFilter=(FirstKeyOnlyFilter)filters.get(1); SkipFilter skipFilter=(SkipFilter)filters.get(2); byte[] columnPrefix=columnPrefixFilter.getPrefix(); assertEquals(new String(columnPrefix),"realtime"); assertTrue(skipFilter.getFilter() instanceof FamilyFilter); FamilyFilter familyFilter=(FamilyFilter)skipFilter.getFilter(); assertEquals(CompareFilter.CompareOp.EQUAL,familyFilter.getOperator()); assertTrue(familyFilter.getComparator() instanceof SubstringComparator); SubstringComparator substringComparator=(SubstringComparator)familyFilter.getComparator(); assertEquals("hihi",new String(substringComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPrecedence1() throws IOException { String filterString=" (PrefixFilter ('realtime')AND FirstKeyOnlyFilter()" + " OR KeyOnlyFilter())"; FilterList filterList=doTestFilter(filterString,FilterList.class); ArrayList filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof FilterList); assertTrue(filters.get(1) instanceof KeyOnlyFilter); filterList=(FilterList)filters.get(0); filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof PrefixFilter); assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); PrefixFilter prefixFilter=(PrefixFilter)filters.get(0); byte[] prefix=prefixFilter.getPrefix(); assertEquals(new String(prefix),"realtime"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompoundFilter1() throws IOException { String filterString=" (PrefixFilter ('realtime')AND FirstKeyOnlyFilter())"; FilterList filterList=doTestFilter(filterString,FilterList.class); ArrayList filters=(ArrayList)filterList.getFilters(); assertTrue(filters.get(0) instanceof PrefixFilter); assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); PrefixFilter PrefixFilter=(PrefixFilter)filters.get(0); byte[] prefix=PrefixFilter.getPrefix(); assertEquals(new String(prefix),"realtime"); FirstKeyOnlyFilter firstKeyOnlyFilter=(FirstKeyOnlyFilter)filters.get(1); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testInclusiveStopFilter() throws IOException { String filterString="InclusiveStopFilter ('row 3')"; InclusiveStopFilter inclusiveStopFilter=doTestFilter(filterString,InclusiveStopFilter.class); byte[] stopRowKey=inclusiveStopFilter.getStopRowKey(); assertEquals(new String(stopRowKey),"row 3"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFamilyFilter() throws IOException { String filterString="FamilyFilter(>=, 'binaryprefix:pre')"; FamilyFilter familyFilter=doTestFilter(filterString,FamilyFilter.class); assertEquals(CompareFilter.CompareOp.GREATER_OR_EQUAL,familyFilter.getOperator()); assertTrue(familyFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator=(BinaryPrefixComparator)familyFilter.getComparator(); assertEquals("pre",new String(binaryPrefixComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompoundFilter2() throws IOException { String filterString="(PrefixFilter('realtime') AND QualifierFilter (>=, 'binary:e'))" + "OR FamilyFilter (=, 'binary:qualifier') "; FilterList filterList=doTestFilter(filterString,FilterList.class); ArrayList filterListFilters=(ArrayList)filterList.getFilters(); assertTrue(filterListFilters.get(0) instanceof FilterList); assertTrue(filterListFilters.get(1) instanceof FamilyFilter); assertEquals(filterList.getOperator(),FilterList.Operator.MUST_PASS_ONE); filterList=(FilterList)filterListFilters.get(0); FamilyFilter familyFilter=(FamilyFilter)filterListFilters.get(1); filterListFilters=(ArrayList)filterList.getFilters(); assertTrue(filterListFilters.get(0) instanceof PrefixFilter); assertTrue(filterListFilters.get(1) instanceof QualifierFilter); assertEquals(filterList.getOperator(),FilterList.Operator.MUST_PASS_ALL); assertEquals(CompareFilter.CompareOp.EQUAL,familyFilter.getOperator()); assertTrue(familyFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator=(BinaryComparator)familyFilter.getComparator(); assertEquals("qualifier",new String(binaryComparator.getValue())); PrefixFilter prefixFilter=(PrefixFilter)filterListFilters.get(0); byte[] prefix=prefixFilter.getPrefix(); assertEquals(new String(prefix),"realtime"); QualifierFilter qualifierFilter=(QualifierFilter)filterListFilters.get(1); assertEquals(CompareFilter.CompareOp.GREATER_OR_EQUAL,qualifierFilter.getOperator()); assertTrue(qualifierFilter.getComparator() instanceof BinaryComparator); binaryComparator=(BinaryComparator)qualifierFilter.getComparator(); assertEquals("e",new String(binaryComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleColumnValueExcludeFilter() throws IOException { String filterString="SingleColumnValueExcludeFilter ('family', 'qualifier', <, 'binaryprefix:a')"; SingleColumnValueExcludeFilter singleColumnValueExcludeFilter=doTestFilter(filterString,SingleColumnValueExcludeFilter.class); assertEquals(singleColumnValueExcludeFilter.getOperator(),CompareFilter.CompareOp.LESS); assertEquals("family",new String(singleColumnValueExcludeFilter.getFamily())); assertEquals("qualifier",new String(singleColumnValueExcludeFilter.getQualifier())); assertEquals(new String(singleColumnValueExcludeFilter.getComparator().getValue()),"a"); assertFalse(singleColumnValueExcludeFilter.getFilterIfMissing()); assertTrue(singleColumnValueExcludeFilter.getLatestVersionOnly()); filterString="SingleColumnValueExcludeFilter " + "('family', 'qualifier', <=, 'binaryprefix:a', true, false)"; singleColumnValueExcludeFilter=doTestFilter(filterString,SingleColumnValueExcludeFilter.class); assertEquals("family",new String(singleColumnValueExcludeFilter.getFamily())); assertEquals("qualifier",new String(singleColumnValueExcludeFilter.getQualifier())); assertEquals(singleColumnValueExcludeFilter.getOperator(),CompareFilter.CompareOp.LESS_OR_EQUAL); assertTrue(singleColumnValueExcludeFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator=(BinaryPrefixComparator)singleColumnValueExcludeFilter.getComparator(); assertEquals(new String(binaryPrefixComparator.getValue()),"a"); assertTrue(singleColumnValueExcludeFilter.getFilterIfMissing()); assertFalse(singleColumnValueExcludeFilter.getLatestVersionOnly()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testColumnPaginationFilter() throws IOException { String filterString="ColumnPaginationFilter(4, 6)"; ColumnPaginationFilter columnPaginationFilter=doTestFilter(filterString,ColumnPaginationFilter.class); int limit=columnPaginationFilter.getLimit(); assertEquals(limit,4); int offset=columnPaginationFilter.getOffset(); assertEquals(offset,6); filterString=" ColumnPaginationFilter('124')"; try { doTestFilter(filterString,ColumnPaginationFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println("ColumnPaginationFilter needs two arguments"); } filterString=" ColumnPaginationFilter('4' , '123a')"; try { doTestFilter(filterString,ColumnPaginationFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println("ColumnPaginationFilter needs two ints as arguments"); } filterString=" ColumnPaginationFilter('4' , '-123')"; try { doTestFilter(filterString,ColumnPaginationFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println("ColumnPaginationFilter arguments should not be negative"); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDependentColumnFilter() throws IOException { String filterString="DependentColumnFilter('family', 'qualifier', true, =, 'binary:abc')"; DependentColumnFilter dependentColumnFilter=doTestFilter(filterString,DependentColumnFilter.class); assertEquals("family",new String(dependentColumnFilter.getFamily())); assertEquals("qualifier",new String(dependentColumnFilter.getQualifier())); assertTrue(dependentColumnFilter.getDropDependentColumn()); assertEquals(CompareFilter.CompareOp.EQUAL,dependentColumnFilter.getOperator()); assertTrue(dependentColumnFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator=(BinaryComparator)dependentColumnFilter.getComparator(); assertEquals("abc",new String(binaryComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testColumnPrefixFilter() throws IOException { String filterString=" ColumnPrefixFilter('qualifier' ) "; ColumnPrefixFilter columnPrefixFilter=doTestFilter(filterString,ColumnPrefixFilter.class); byte[] columnPrefix=columnPrefixFilter.getPrefix(); assertEquals(new String(columnPrefix),"qualifier"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSkipFilter() throws IOException { String filterString="SKIP ValueFilter( =, 'binary:0')"; SkipFilter skipFilter=doTestFilter(filterString,SkipFilter.class); assertTrue(skipFilter.getFilter() instanceof ValueFilter); ValueFilter valueFilter=(ValueFilter)skipFilter.getFilter(); assertEquals(CompareFilter.CompareOp.EQUAL,valueFilter.getOperator()); assertTrue(valueFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator=(BinaryComparator)valueFilter.getComparator(); assertEquals("0",new String(binaryComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testUnescapedQuote2() throws IOException { String filterString="InclusiveStopFilter ('row''3''')"; InclusiveStopFilter inclusiveStopFilter=doTestFilter(filterString,InclusiveStopFilter.class); byte[] stopRowKey=inclusiveStopFilter.getStopRowKey(); assertEquals(new String(stopRowKey),"row'3'"); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultipleColumnPrefixFilter() throws IOException { String filterString=" MultipleColumnPrefixFilter('qualifier1', 'qualifier2' ) "; MultipleColumnPrefixFilter multipleColumnPrefixFilter=doTestFilter(filterString,MultipleColumnPrefixFilter.class); byte[][] prefixes=multipleColumnPrefixFilter.getPrefix(); assertEquals(new String(prefixes[0]),"qualifier1"); assertEquals(new String(prefixes[1]),"qualifier2"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleColumnValueFilter() throws IOException { String filterString="SingleColumnValueFilter " + "('family', 'qualifier', >=, 'binary:a', true, false)"; SingleColumnValueFilter singleColumnValueFilter=doTestFilter(filterString,SingleColumnValueFilter.class); assertEquals("family",new String(singleColumnValueFilter.getFamily())); assertEquals("qualifier",new String(singleColumnValueFilter.getQualifier())); assertEquals(singleColumnValueFilter.getOperator(),CompareFilter.CompareOp.GREATER_OR_EQUAL); assertTrue(singleColumnValueFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator=(BinaryComparator)singleColumnValueFilter.getComparator(); assertEquals(new String(binaryComparator.getValue()),"a"); assertTrue(singleColumnValueFilter.getFilterIfMissing()); assertFalse(singleColumnValueFilter.getLatestVersionOnly()); filterString="SingleColumnValueFilter ('family', 'qualifier', >, 'binaryprefix:a')"; singleColumnValueFilter=doTestFilter(filterString,SingleColumnValueFilter.class); assertEquals("family",new String(singleColumnValueFilter.getFamily())); assertEquals("qualifier",new String(singleColumnValueFilter.getQualifier())); assertEquals(singleColumnValueFilter.getOperator(),CompareFilter.CompareOp.GREATER); assertTrue(singleColumnValueFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator=(BinaryPrefixComparator)singleColumnValueFilter.getComparator(); assertEquals(new String(binaryPrefixComparator.getValue()),"a"); assertFalse(singleColumnValueFilter.getFilterIfMissing()); assertTrue(singleColumnValueFilter.getLatestVersionOnly()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWhileFilter() throws IOException { String filterString=" WHILE RowFilter ( !=, 'binary:row1')"; WhileMatchFilter whileMatchFilter=doTestFilter(filterString,WhileMatchFilter.class); assertTrue(whileMatchFilter.getFilter() instanceof RowFilter); RowFilter rowFilter=(RowFilter)whileMatchFilter.getFilter(); assertEquals(CompareFilter.CompareOp.NOT_EQUAL,rowFilter.getOperator()); assertTrue(rowFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator=(BinaryComparator)rowFilter.getComparator(); assertEquals("row1",new String(binaryComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPageFilter() throws IOException { String filterString=" PageFilter(4)"; PageFilter pageFilter=doTestFilter(filterString,PageFilter.class); long pageSize=pageFilter.getPageSize(); assertEquals(pageSize,4); filterString=" PageFilter('123')"; try { doTestFilter(filterString,PageFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println("PageFilter needs an int as an argument"); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testUnescapedQuote3() throws IOException { String filterString=" InclusiveStopFilter ('''')"; InclusiveStopFilter inclusiveStopFilter=doTestFilter(filterString,InclusiveStopFilter.class); byte[] stopRowKey=inclusiveStopFilter.getStopRowKey(); assertEquals(new String(stopRowKey),"'"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testColumnRangeFilter() throws IOException { String filterString="ColumnRangeFilter('abc', true, 'xyz', false)"; ColumnRangeFilter columnRangeFilter=doTestFilter(filterString,ColumnRangeFilter.class); assertEquals("abc",new String(columnRangeFilter.getMinColumn())); assertEquals("xyz",new String(columnRangeFilter.getMaxColumn())); assertTrue(columnRangeFilter.isMinColumnInclusive()); assertFalse(columnRangeFilter.isMaxColumnInclusive()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRowFilter() throws IOException { String filterString="RowFilter ( =, 'binary:regionse')"; RowFilter rowFilter=doTestFilter(filterString,RowFilter.class); assertEquals(CompareFilter.CompareOp.EQUAL,rowFilter.getOperator()); assertTrue(rowFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator=(BinaryComparator)rowFilter.getComparator(); assertEquals("regionse",new String(binaryComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testColumnCountGetFilter() throws IOException { String filterString=" ColumnCountGetFilter(4)"; ColumnCountGetFilter columnCountGetFilter=doTestFilter(filterString,ColumnCountGetFilter.class); int limit=columnCountGetFilter.getLimit(); assertEquals(limit,4); filterString=" ColumnCountGetFilter('abc')"; try { doTestFilter(filterString,ColumnCountGetFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println(e.getMessage()); } filterString=" ColumnCountGetFilter(2147483648)"; try { doTestFilter(filterString,ColumnCountGetFilter.class); assertTrue(false); } catch ( IllegalArgumentException e) { System.out.println(e.getMessage()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testValueFilter() throws IOException { String filterString="ValueFilter(!=, 'substring:pre')"; ValueFilter valueFilter=doTestFilter(filterString,ValueFilter.class); assertEquals(CompareFilter.CompareOp.NOT_EQUAL,valueFilter.getOperator()); assertTrue(valueFilter.getComparator() instanceof SubstringComparator); SubstringComparator substringComparator=(SubstringComparator)valueFilter.getComparator(); assertEquals("pre",new String(substringComparator.getValue())); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testUnescapedQuote1() throws IOException { String filterString="InclusiveStopFilter ('row''3')"; InclusiveStopFilter inclusiveStopFilter=doTestFilter(filterString,InclusiveStopFilter.class); byte[] stopRowKey=inclusiveStopFilter.getStopRowKey(); assertEquals(new String(stopRowKey),"row'3"); }

Class: org.apache.hadoop.hbase.filter.TestRegexComparator

InternalCallVerifier BooleanVerifier 
@Test public void testSerialization() throws Exception { RegexStringComparator a=new RegexStringComparator("a|b"); RegexStringComparator b=RegexStringComparator.parseFrom(a.toByteArray()); assertTrue(a.areSerializedFieldsEqual(b)); assertTrue(b.getEngine() instanceof RegexStringComparator.JavaRegexEngine); a=new RegexStringComparator("a|b",EngineType.JONI); b=RegexStringComparator.parseFrom(a.toByteArray()); assertTrue(a.areSerializedFieldsEqual(b)); assertTrue(b.getEngine() instanceof RegexStringComparator.JoniRegexEngine); }

Class: org.apache.hadoop.hbase.filter.TestSingleColumnValueExcludeFilter

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the overridden functionality of filterKeyValue(KeyValue) * @throws Exception */ @Test public void testFilterKeyValue() throws Exception { Filter filter=new SingleColumnValueExcludeFilter(COLUMN_FAMILY,COLUMN_QUALIFIER,CompareOp.EQUAL,VAL_1); List kvs=new ArrayList(); KeyValue kv=new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER_2,VAL_1); kvs.add(new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER_2,VAL_1)); kvs.add(new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER,VAL_1)); kvs.add(new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER_2,VAL_1)); filter.filterRowCells(kvs); assertEquals("resultSize",kvs.size(),2); assertTrue("leftKV1",CellComparator.COMPARATOR.compare(kvs.get(0),kv) == 0); assertTrue("leftKV2",CellComparator.COMPARATOR.compare(kvs.get(1),kv) == 0); assertFalse("allRemainingWhenMatch",filter.filterAllRemaining()); filter.reset(); kv=new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER_2,VAL_1); assertTrue("otherColumn",filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); kv=new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER,VAL_2); assertTrue("testedMismatch",filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); kv=new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER_2,VAL_1); assertTrue("otherColumn",filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); }

Class: org.apache.hadoop.hbase.filter.TestSingleColumnValueFilter

APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test public void testLongComparator() throws IOException { Filter filter=new SingleColumnValueFilter(COLUMN_FAMILY,COLUMN_QUALIFIER,CompareOp.GREATER,new LongComparator(100L)); KeyValue kv=new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER,Bytes.toBytes(1L)); assertTrue("less than",filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); filter.reset(); byte[] buffer=kv.getBuffer(); Cell c=new ByteBufferedCellImpl(ByteBuffer.wrap(buffer),0,buffer.length); assertTrue("less than",filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW); filter.reset(); kv=new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER,Bytes.toBytes(100L)); assertTrue("Equals 100",filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW); filter.reset(); buffer=kv.getBuffer(); c=new ByteBufferedCellImpl(ByteBuffer.wrap(buffer),0,buffer.length); assertTrue("Equals 100",filter.filterKeyValue(c) == Filter.ReturnCode.NEXT_ROW); filter.reset(); kv=new KeyValue(ROW,COLUMN_FAMILY,COLUMN_QUALIFIER,Bytes.toBytes(120L)); assertTrue("include 120",filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE); filter.reset(); buffer=kv.getBuffer(); c=new ByteBufferedCellImpl(ByteBuffer.wrap(buffer),0,buffer.length); assertTrue("include 120",filter.filterKeyValue(c) == Filter.ReturnCode.INCLUDE); }

Class: org.apache.hadoop.hbase.fs.TestBlockReorder

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
/** * Test that the hook works within HBase, including when there are multiple blocks. */ @Test() public void testHBaseCluster() throws Exception { byte[] sb="sb".getBytes(); htu.startMiniZKCluster(); MiniHBaseCluster hbm=htu.startMiniHBaseCluster(1,1); hbm.waitForActiveAndReadyMaster(); HRegionServer targetRs=hbm.getMaster(); String host4=targetRs.getServerName().getHostname(); LOG.info("Starting a new datanode with the name=" + host4); cluster.startDataNodes(conf,1,true,null,new String[]{"/r4"},new String[]{host4},null); cluster.waitClusterUp(); final int repCount=3; conf=targetRs.getConfiguration(); HFileSystem rfs=(HFileSystem)targetRs.getFileSystem(); Table h=htu.createTable(TableName.valueOf("table"),sb); String rootDir=new Path(FSUtils.getRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME+ "/"+ targetRs.getServerName().toString()).toUri().getPath(); DistributedFileSystem mdfs=(DistributedFileSystem)hbm.getMaster().getMasterFileSystem().getFileSystem(); int nbTest=0; while (nbTest < 10) { final List regions=targetRs.getOnlineRegions(h.getName()); final CountDownLatch latch=new CountDownLatch(regions.size()); final WALActionsListener listener=new WALActionsListener.Base(){ @Override public void postLogRoll( final Path oldPath, final Path newPath) throws IOException { latch.countDown(); } } ; for ( Region region : regions) { ((HRegion)region).getWAL().registerWALActionsListener(listener); } htu.getHBaseAdmin().rollWALWriter(targetRs.getServerName()); try { latch.await(); } catch ( InterruptedException exception) { LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later "+ "tests fail, it's probably because we should still be waiting."); Thread.currentThread().interrupt(); } for ( Region region : regions) { ((HRegion)region).getWAL().unregisterWALActionsListener(listener); } Thread.sleep(100); Put p=new Put(sb); p.addColumn(sb,sb,sb); h.put(p); DirectoryListing dl=dfs.getClient().listPaths(rootDir,HdfsFileStatus.EMPTY_NAME); HdfsFileStatus[] hfs=dl.getPartialListing(); Assert.assertTrue(hfs.length >= 1); for ( HdfsFileStatus hf : hfs) { try { LOG.info("Log file found: " + hf.getLocalName() + " in "+ rootDir); String logFile=rootDir + "/" + hf.getLocalName(); FileStatus fsLog=rfs.getFileStatus(new Path(logFile)); LOG.info("Checking log file: " + logFile); BlockLocation[] bls=rfs.getFileBlockLocations(fsLog,0,1); if (bls.length > 0) { BlockLocation bl=bls[0]; LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile+ " "); for (int i=0; i < bl.getHosts().length - 1; i++) { LOG.info(bl.getHosts()[i] + " " + logFile); Assert.assertNotSame(bl.getHosts()[i],host4); } String last=bl.getHosts()[bl.getHosts().length - 1]; LOG.info(last + " " + logFile); if (host4.equals(last)) { nbTest++; LOG.info(logFile + " is on the new datanode and is ok"); if (bl.getHosts().length == 3) { testFromDFS(dfs,logFile,repCount,host4); testFromDFS(mdfs,logFile,repCount,host4); } } } } catch ( FileNotFoundException exception) { LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was "+ "archived out from under us so we'll ignore and retry. If this test hangs "+ "indefinitely you should treat this failure as a symptom.",exception); } catch ( RemoteException exception) { if (exception.unwrapRemoteException() instanceof FileNotFoundException) { LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was "+ "archived out from under us so we'll ignore and retry. If this test hangs "+ "indefinitely you should treat this failure as a symptom.",exception); } else { throw exception; } } } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that the reorder algo works as we expect. */ @Test public void testBlockLocation() throws Exception { htu.startMiniZKCluster(); MiniHBaseCluster hbm=htu.startMiniHBaseCluster(1,1); conf=hbm.getConfiguration(); final String fileName="/helloWorld"; Path p=new Path(fileName); final int repCount=3; Assert.assertTrue((short)cluster.getDataNodes().size() >= repCount); FSDataOutputStream fop=dfs.create(p,(short)repCount); final double toWrite=875.5613; fop.writeDouble(toWrite); fop.close(); for (int i=0; i < 10; i++) { LocatedBlocks l; final long max=System.currentTimeMillis() + 10000; do { l=getNamenode(dfs.getClient()).getBlockLocations(fileName,0,1); Assert.assertNotNull(l.getLocatedBlocks()); Assert.assertEquals(l.getLocatedBlocks().size(),1); Assert.assertTrue("Expecting " + repCount + " , got "+ l.get(0).getLocations().length,System.currentTimeMillis() < max); } while (l.get(0).getLocations().length != repCount); Object originalList[]=l.getLocatedBlocks().toArray(); HFileSystem.ReorderWALBlocks lrb=new HFileSystem.ReorderWALBlocks(); lrb.reorderBlocks(conf,l,fileName); Assert.assertArrayEquals(originalList,l.getLocatedBlocks().toArray()); Assert.assertNotNull(conf.get(HConstants.HBASE_DIR)); Assert.assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty()); String pseudoLogFile=conf.get(HConstants.HBASE_DIR) + "/" + HConstants.HREGION_LOGDIR_NAME+ "/"+ host1+ ",6977,6576"+ "/mylogfile"; Assert.assertNotNull("log= " + pseudoLogFile,DefaultWALProvider.getServerNameFromWALDirectoryName(dfs.getConf(),pseudoLogFile)); lrb.reorderBlocks(conf,l,pseudoLogFile); Assert.assertEquals(host1,l.get(0).getLocations()[2].getHostName()); lrb.reorderBlocks(conf,l,pseudoLogFile); Assert.assertEquals(host1,l.get(0).getLocations()[2].getHostName()); } }

IterativeVerifier InternalCallVerifier BooleanVerifier 
/** * Test that we're can add a hook, and that this hook works when we try to read the file in HDFS. */ @Test public void testBlockLocationReorder() throws Exception { Path p=new Path("hello"); Assert.assertTrue((short)cluster.getDataNodes().size() > 1); final int repCount=2; FSDataOutputStream fop=dfs.create(p,(short)repCount); final double toWrite=875.5613; fop.writeDouble(toWrite); fop.close(); long start=System.currentTimeMillis(); FSDataInputStream fin=dfs.open(p); Assert.assertTrue(toWrite == fin.readDouble()); long end=System.currentTimeMillis(); LOG.info("readtime= " + (end - start)); fin.close(); Assert.assertTrue((end - start) < 30 * 1000); FileStatus f=dfs.getFileStatus(p); BlockLocation[] lbs; do { lbs=dfs.getFileBlockLocations(f,0,1); } while (lbs.length != 1 && lbs[0].getLength() != repCount); final String name=lbs[0].getNames()[0]; Assert.assertTrue(name.indexOf(':') > 0); String portS=name.substring(name.indexOf(':') + 1); final int port=Integer.parseInt(portS); LOG.info("port= " + port); int ipcPort=-1; boolean ok=false; final String lookup=lbs[0].getHosts()[0]; StringBuilder sb=new StringBuilder(); for ( DataNode dn : cluster.getDataNodes()) { final String dnName=getHostName(dn); sb.append(dnName).append(' '); if (lookup.equals(dnName)) { ok=true; LOG.info("killing datanode " + name + " / "+ lookup); ipcPort=dn.ipcServer.getListenerAddress().getPort(); dn.shutdown(); LOG.info("killed datanode " + name + " / "+ lookup); break; } } Assert.assertTrue("didn't find the server to kill, was looking for " + lookup + " found "+ sb,ok); LOG.info("ipc port= " + ipcPort); Assert.assertTrue(HFileSystem.addLocationsOrderInterceptor(conf,new HFileSystem.ReorderBlocks(){ @Override public void reorderBlocks( Configuration c, LocatedBlocks lbs, String src){ for ( LocatedBlock lb : lbs.getLocatedBlocks()) { if (lb.getLocations().length > 1) { DatanodeInfo[] infos=lb.getLocations(); if (infos[0].getHostName().equals(lookup)) { LOG.info("HFileSystem bad host, inverting"); DatanodeInfo tmp=infos[0]; infos[0]=infos[1]; infos[1]=tmp; } } } } } )); final int retries=10; ServerSocket ss=null; ServerSocket ssI; try { ss=new ServerSocket(port); ssI=new ServerSocket(ipcPort); } catch ( BindException be) { LOG.warn("Got bind exception trying to set up socket on " + port + " or "+ ipcPort+ ", this means that the datanode has not closed the socket or"+ " someone else took it. It may happen, skipping this test for this time.",be); if (ss != null) { ss.close(); } return; } for (int i=0; i < retries; i++) { start=System.currentTimeMillis(); fin=dfs.open(p); Assert.assertTrue(toWrite == fin.readDouble()); fin.close(); end=System.currentTimeMillis(); LOG.info("HFileSystem readtime= " + (end - start)); Assert.assertFalse("We took too much time to read",(end - start) > 60000); } ss.close(); ssI.close(); }

Class: org.apache.hadoop.hbase.http.TestHtmlQuoting

InternalCallVerifier EqualityVerifier 
@Test public void testRequestQuoting() throws Exception { HttpServletRequest mockReq=Mockito.mock(HttpServletRequest.class); HttpServer.QuotingInputFilter.RequestQuoter quoter=new HttpServer.QuotingInputFilter.RequestQuoter(mockReq); Mockito.doReturn("a

Class: org.apache.hadoop.hbase.http.TestHttpRequestLogAppender

InternalCallVerifier EqualityVerifier 
@Test public void testParameterPropagation(){ HttpRequestLogAppender requestLogAppender=new HttpRequestLogAppender(); requestLogAppender.setFilename("jetty-namenode-yyyy_mm_dd.log"); requestLogAppender.setRetainDays(17); assertEquals("Filename mismatch","jetty-namenode-yyyy_mm_dd.log",requestLogAppender.getFilename()); assertEquals("Retain days mismatch",17,requestLogAppender.getRetainDays()); }

Class: org.apache.hadoop.hbase.http.TestHttpServer

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBindAddress() throws Exception { checkBindAddress("localhost",0,false).stop(); HttpServer myServer=checkBindAddress("localhost",0,false); HttpServer myServer2=null; try { int port=myServer.getConnectorAddress(0).getPort(); myServer2=checkBindAddress("localhost",port,true); port=myServer2.getConnectorAddress(0).getPort(); myServer2.stop(); assertNull(myServer2.getConnectorAddress(0)); myServer2.openListeners(); assertEquals(port,myServer2.getConnectorAddress(0).getPort()); } finally { myServer.stop(); if (myServer2 != null) { myServer2.stop(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testXFrameHeaderSameOrigin() throws Exception { Configuration conf=new Configuration(); conf.set("hbase.http.filter.xframeoptions.mode","SAMEORIGIN"); HttpServer myServer=new HttpServer.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE,conf); myServer.addServlet("echo","/echo",EchoServlet.class); myServer.start(); String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); URL url=new URL(new URL(serverURL),"/echo?a=b&c=d"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); assertEquals("SAMEORIGIN",conn.getHeaderField("X-Frame-Options")); myServer.stop(); }

InternalCallVerifier BooleanVerifier 
@Test public void testHasAdministratorAccess() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,false); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRemoteUser()).thenReturn(null); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); Assert.assertTrue(HttpServer.hasAdministratorAccess(context,request,response)); response=Mockito.mock(HttpServletResponse.class); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true); Assert.assertFalse(HttpServer.hasAdministratorAccess(context,request,response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),Mockito.anyString()); response=Mockito.mock(HttpServletResponse.class); Mockito.when(request.getRemoteUser()).thenReturn("foo"); Assert.assertTrue(HttpServer.hasAdministratorAccess(context,request,response)); response=Mockito.mock(HttpServletResponse.class); AccessControlList acls=Mockito.mock(AccessControlList.class); Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.hasAdministratorAccess(context,request,response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),Mockito.anyString()); response=Mockito.mock(HttpServletResponse.class); Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(true); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertTrue(HttpServer.hasAdministratorAccess(context,request,response)); }

InternalCallVerifier EqualityVerifier HybridVerifier IgnoredMethod 
/** * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics * servlets, when authentication filters are set, but authorization is not * enabled. * @throws Exception */ @Test @Ignore public void testDisabledAuthorizationOfDefaultServlets() throws Exception { Configuration conf=new Configuration(); conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY,DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB")); HttpServer myServer=new HttpServer.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE,conf); myServer.start(); String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) { for ( String user : new String[]{"userA","userB"}) { assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user)); } } myServer.stop(); }

InternalCallVerifier BooleanVerifier 
@Test public void testRequestQuoterWithNotNull() throws Exception { HttpServletRequest request=Mockito.mock(HttpServletRequest.class); String[] values=new String[]{"abc","def"}; Mockito.doReturn(values).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter=new RequestQuoter(request); String[] parameterValues=requestQuoter.getParameterValues("dummy"); Assert.assertTrue("It should return Parameter Values",Arrays.equals(values,parameterValues)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier IgnoredMethod 
/** * Verify the administrator access for /logs, /stacks, /conf, /logLevel and * /metrics servlets. * @throws Exception */ @Test @Ignore public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,true); conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY,DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB")); MyGroupsProvider.mapping.put("userC",Arrays.asList("groupC")); MyGroupsProvider.mapping.put("userD",Arrays.asList("groupD")); MyGroupsProvider.mapping.put("userE",Arrays.asList("groupE")); HttpServer myServer=new HttpServer.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE,conf); myServer.start(); String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) { for ( String user : new String[]{"userA","userB","userC","userD"}) { assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user)); } assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,getHttpStatusCode(serverURL + servlet,"userE")); } myServer.stop(); }

InternalCallVerifier BooleanVerifier 
@Test public void testRequiresAuthorizationAccess() throws Exception { Configuration conf=new Configuration(); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context,request,response)); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true); AccessControlList acls=Mockito.mock(AccessControlList.class); Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context,request,response)); }

InternalCallVerifier EqualityVerifier 
@Test public void testRequestQuoterWithNull() throws Exception { HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.doReturn(null).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter=new RequestQuoter(request); String[] parameterValues=requestQuoter.getParameterValues("dummy"); Assert.assertEquals("It should return null " + "when there are no values for the parameter",null,parameterValues); }

Class: org.apache.hadoop.hbase.http.TestHttpServerLifecycle

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier IgnoredMethod 
/** * Test that the server is alive once started * @throws Throwableon failure */ @Ignore("Hangs on occasion; see HBASE-14430") @Test(timeout=60000) public void testWepAppContextAfterServerStop() throws Throwable { HttpServer server=null; String key="test.attribute.key"; String value="test.attribute.value"; server=createTestServer(); assertNotLive(server); server.start(); server.setAttribute(key,value); assertAlive(server); assertEquals(value,server.getAttribute(key)); stop(server); assertNull("Server context should have cleared",server.getAttribute(key)); }

Class: org.apache.hadoop.hbase.http.TestHttpServerWebapps

UtilityVerifier InternalCallVerifier 
/** * Test that an invalid webapp triggers an exception * @throws Throwable if something went wrong */ @Test public void testMissingServerResource() throws Throwable { try { HttpServer server=createServer("NoSuchWebapp"); String serverDescription=server.toString(); stop(server); fail("Expected an exception, got " + serverDescription); } catch ( FileNotFoundException expected) { log.debug("Expected exception " + expected,expected); } }

Class: org.apache.hadoop.hbase.http.conf.TestConfServlet

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteXml() throws Exception { StringWriter sw=new StringWriter(); ConfServlet.writeResponse(getTestConf(),sw,"xml"); String xml=sw.toString(); DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance(); DocumentBuilder builder=docBuilderFactory.newDocumentBuilder(); Document doc=builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes=doc.getElementsByTagName("name"); boolean foundSetting=false; for (int i=0; i < nameNodes.getLength(); i++) { Node nameNode=nameNodes.item(i); String key=nameNode.getTextContent(); System.err.println("xml key: " + key); if (TEST_KEY.equals(key)) { foundSetting=true; Element propertyElem=(Element)nameNode.getParentNode(); String val=propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL,val); } } assertTrue(foundSetting); }

Class: org.apache.hadoop.hbase.http.lib.TestStaticUserWebFilter

InternalCallVerifier EqualityVerifier 
@Test public void testFilter() throws Exception { FilterConfig config=mockConfig("myuser"); StaticUserFilter suf=new StaticUserFilter(); suf.init(config); ArgumentCaptor wrapperArg=ArgumentCaptor.forClass(HttpServletRequestWrapper.class); FilterChain chain=mock(FilterChain.class); suf.doFilter(mock(HttpServletRequest.class),mock(ServletResponse.class),chain); Mockito.verify(chain).doFilter(wrapperArg.capture(),Mockito.anyObject()); HttpServletRequestWrapper wrapper=wrapperArg.getValue(); assertEquals("myuser",wrapper.getUserPrincipal().getName()); assertEquals("myuser",wrapper.getRemoteUser()); suf.destroy(); }

InternalCallVerifier EqualityVerifier 
@Test public void testConfiguration(){ Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER,"dr.stack"); assertEquals("dr.stack",StaticUserWebFilter.getUsernameFromConf(conf)); }

InternalCallVerifier EqualityVerifier 
@Test public void testOldStyleConfiguration(){ Configuration conf=new Configuration(); conf.set("dfs.web.ugi","joe,group1,group2"); assertEquals("joe",StaticUserWebFilter.getUsernameFromConf(conf)); }

Class: org.apache.hadoop.hbase.http.log.TestLogLevel

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) @SuppressWarnings("deprecation") public void testDynamicLogLevel() throws Exception { String logName=TestLogLevel.class.getName(); Log testlog=LogFactory.getLog(logName); if (testlog instanceof Log4JLogger) { Logger log=((Log4JLogger)testlog).getLogger(); log.debug("log.debug1"); log.info("log.info1"); log.error("log.error1"); assertTrue(!Level.ERROR.equals(log.getEffectiveLevel())); HttpServer server=null; try { server=new HttpServer.Builder().setName("..").addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); server.start(); String authority=NetUtils.getHostPortString(server.getConnectorAddress(0)); URL url=new URL("http://" + authority + "/logLevel?log="+ logName+ "&level="+ Level.ERROR); out.println("*** Connecting to " + url); HttpURLConnection connection=(HttpURLConnection)url.openConnection(); connection.connect(); BufferedReader in=new BufferedReader(new InputStreamReader(connection.getInputStream())); for (String line; (line=in.readLine()) != null; out.println(line)) ; in.close(); connection.disconnect(); log.debug("log.debug2"); log.info("log.info2"); log.error("log.error2"); assertTrue(Level.ERROR.equals(log.getEffectiveLevel())); String[] args={"-setlevel",authority,logName,Level.DEBUG.toString()}; LogLevel.main(args); log.debug("log.debug3"); log.info("log.info3"); log.error("log.error3"); assertTrue(Level.DEBUG.equals(log.getEffectiveLevel())); } finally { if (server != null) { server.stop(); } } } else { out.println(testlog.getClass() + " not tested."); } }

Class: org.apache.hadoop.hbase.io.TestBoundedByteBufferPool

InternalCallVerifier EqualityVerifier 
@Test public void testBufferSizeGrowWithMultiThread() throws Exception { final ConcurrentLinkedDeque bufferQueue=new ConcurrentLinkedDeque(); int takeBufferThreadsCount=30; int putBufferThreadsCount=1; Thread takeBufferThreads[]=new Thread[takeBufferThreadsCount]; for (int i=0; i < takeBufferThreadsCount; i++) { takeBufferThreads[i]=new Thread(new Runnable(){ @Override public void run(){ while (true) { ByteBuffer buffer=reservoir.getBuffer(); try { Thread.sleep(5); } catch ( InterruptedException e) { break; } bufferQueue.offer(buffer); if (Thread.currentThread().isInterrupted()) break; } } } ); } Thread putBufferThread[]=new Thread[putBufferThreadsCount]; for (int i=0; i < putBufferThreadsCount; i++) { putBufferThread[i]=new Thread(new Runnable(){ @Override public void run(){ while (true) { ByteBuffer buffer=bufferQueue.poll(); if (buffer != null) { reservoir.putBuffer(buffer); } if (Thread.currentThread().isInterrupted()) break; } } } ); } for (int i=0; i < takeBufferThreadsCount; i++) { takeBufferThreads[i].start(); } for (int i=0; i < putBufferThreadsCount; i++) { putBufferThread[i].start(); } Thread.sleep(2 * 1000); for (int i=0; i < takeBufferThreadsCount; i++) { takeBufferThreads[i].interrupt(); takeBufferThreads[i].join(); } for (int i=0; i < putBufferThreadsCount; i++) { putBufferThread[i].interrupt(); putBufferThread[i].join(); } assertEquals(initialByteBufferSize,this.reservoir.getRunningAverage()); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetPut(){ ByteBuffer bb=this.reservoir.getBuffer(); assertEquals(initialByteBufferSize,bb.capacity()); assertEquals(0,this.reservoir.getQueueSize()); this.reservoir.putBuffer(bb); assertEquals(1,this.reservoir.getQueueSize()); this.reservoir.getBuffer(); assertEquals(0,this.reservoir.getQueueSize()); final int newCapacity=2; this.reservoir.putBuffer(ByteBuffer.allocate(newCapacity)); assertEquals(1,reservoir.getQueueSize()); this.reservoir.getBuffer(); assertEquals(0,this.reservoir.getQueueSize()); bb=this.reservoir.getBuffer(); assertEquals(newCapacity,bb.capacity()); assertEquals(0,this.reservoir.getQueueSize()); this.reservoir.putBuffer(ByteBuffer.allocate(maxByteBufferSizeToCache * 2)); assertEquals(0,this.reservoir.getQueueSize()); for (int i=0; i < maxToCache; i++) { this.reservoir.putBuffer(ByteBuffer.allocate(initialByteBufferSize)); } assertEquals(maxToCache,this.reservoir.getQueueSize()); }

Class: org.apache.hadoop.hbase.io.TestFileLink

InternalCallVerifier EqualityVerifier 
/** * Test, on HDFS, that the FileLink is still readable * even when the current file gets renamed. */ @Test public void testHDFSLinkReadDuringRename() throws Exception { HBaseTestingUtility testUtil=new HBaseTestingUtility(); Configuration conf=testUtil.getConfiguration(); conf.setInt("dfs.blocksize",1024 * 1024); conf.setInt("dfs.client.read.prefetch.size",2 * 1024 * 1024); testUtil.startMiniDFSCluster(1); MiniDFSCluster cluster=testUtil.getDFSCluster(); FileSystem fs=cluster.getFileSystem(); assertEquals("hdfs",fs.getUri().getScheme()); try { testLinkReadDuringRename(fs,testUtil.getDefaultRootDirPath()); } finally { testUtil.shutdownMiniCluster(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that link is still readable even when the current file gets deleted. * NOTE: This test is valid only on HDFS. * When a file is deleted from a local file-system, it is simply 'unlinked'. * The inode, which contains the file's data, is not deleted until all * processes have finished with it. * In HDFS when the request exceed the cached block locations, * a query to the namenode is performed, using the filename, * and the deleted file doesn't exists anymore (FileNotFoundException). */ @Test public void testHDFSLinkReadDuringDelete() throws Exception { HBaseTestingUtility testUtil=new HBaseTestingUtility(); Configuration conf=testUtil.getConfiguration(); conf.setInt("dfs.blocksize",1024 * 1024); conf.setInt("dfs.client.read.prefetch.size",2 * 1024 * 1024); testUtil.startMiniDFSCluster(1); MiniDFSCluster cluster=testUtil.getDFSCluster(); FileSystem fs=cluster.getFileSystem(); assertEquals("hdfs",fs.getUri().getScheme()); try { List files=new ArrayList(); for (int i=0; i < 3; i++) { Path path=new Path(String.format("test-data-%d",i)); writeSomeData(fs,path,1 << 20,(byte)i); files.add(path); } FileLink link=new FileLink(files); FSDataInputStream in=link.open(fs); try { byte[] data=new byte[8192]; int n; n=in.read(data); dataVerify(data,n,(byte)0); fs.delete(files.get(0),true); skipBuffer(in,(byte)0); n=in.read(data); dataVerify(data,n,(byte)1); fs.delete(files.get(1),true); skipBuffer(in,(byte)1); n=in.read(data); dataVerify(data,n,(byte)2); fs.delete(files.get(2),true); skipBuffer(in,(byte)2); try { n=in.read(data); assert (n <= 0); } catch ( FileNotFoundException e) { assertTrue(true); } } finally { in.close(); } } finally { testUtil.shutdownMiniCluster(); } }

InternalCallVerifier EqualityVerifier 
/** * Test, on a local filesystem, that the FileLink is still readable * even when the current file gets renamed. */ @Test public void testLocalLinkReadDuringRename() throws IOException { HBaseTestingUtility testUtil=new HBaseTestingUtility(); FileSystem fs=testUtil.getTestFileSystem(); assertEquals("file",fs.getUri().getScheme()); testLinkReadDuringRename(fs,testUtil.getDataTestDir()); }

Class: org.apache.hadoop.hbase.io.TestHFileLink

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBackReference(){ Path rootDir=new Path("/root"); Path archiveDir=new Path(rootDir,".archive"); String storeFileName="121212"; String linkDir=FileLink.BACK_REFERENCES_DIRECTORY_PREFIX + storeFileName; String encodedRegion="FEFE"; String cf="cf1"; TableName refTables[]={TableName.valueOf("refTable"),TableName.valueOf("ns","refTable")}; for ( TableName refTable : refTables) { Path refTableDir=FSUtils.getTableDir(archiveDir,refTable); Path refRegionDir=HRegion.getRegionDir(refTableDir,encodedRegion); Path refDir=new Path(refRegionDir,cf); Path refLinkDir=new Path(refDir,linkDir); String refStoreFileName=refTable.getNameAsString().replace(TableName.NAMESPACE_DELIM,'=') + "=" + encodedRegion+ "-"+ storeFileName; TableName tableNames[]={TableName.valueOf("tableName1"),TableName.valueOf("ns","tableName2")}; for ( TableName tableName : tableNames) { Path tableDir=FSUtils.getTableDir(rootDir,tableName); Path regionDir=HRegion.getRegionDir(tableDir,encodedRegion); Path cfDir=new Path(regionDir,cf); assertEquals(encodedRegion + "." + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM,'='),HFileLink.createBackReferenceName(tableName.getNameAsString(),encodedRegion)); Pair parsedRef=HFileLink.parseBackReferenceName(encodedRegion + "." + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM,'=')); assertEquals(parsedRef.getFirst(),tableName); assertEquals(parsedRef.getSecond(),encodedRegion); Path storeFileDir=new Path(refLinkDir,encodedRegion + "." + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM,'=')); Path linkPath=new Path(cfDir,refStoreFileName); assertEquals(linkPath,HFileLink.getHFileFromBackReference(rootDir,storeFileDir)); } } }

Class: org.apache.hadoop.hbase.io.TestHalfStoreFileReader

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testHalfScanner() throws IOException { String root_dir=TEST_UTIL.getDataTestDir().toString(); Path p=new Path(root_dir,"test"); Configuration conf=TEST_UTIL.getConfiguration(); FileSystem fs=FileSystem.get(conf); CacheConfig cacheConf=new CacheConfig(conf); HFileContext meta=new HFileContextBuilder().withBlockSize(1024).build(); HFile.Writer w=HFile.getWriterFactory(conf,cacheConf).withPath(fs,p).withFileContext(meta).create(); List items=genSomeKeys(); for ( KeyValue kv : items) { w.append(kv); } w.close(); HFile.Reader r=HFile.createReader(fs,p,cacheConf,conf); r.loadFileInfo(); Cell midKV=r.midkey(); byte[] midkey=CellUtil.cloneRow(midKV); Reference bottom=new Reference(midkey,Reference.Range.bottom); Reference top=new Reference(midkey,Reference.Range.top); KeyValue beforeMidKey=null; for ( KeyValue item : items) { if (CellComparator.COMPARATOR.compare(item,midKV) >= 0) { break; } beforeMidKey=item; } System.out.println("midkey: " + midKV + " or: "+ Bytes.toStringBinary(midkey)); System.out.println("beforeMidKey: " + beforeMidKey); Cell foundKeyValue=doTestOfSeekBefore(p,fs,bottom,midKV,cacheConf); assertEquals(beforeMidKey,foundKeyValue); foundKeyValue=doTestOfSeekBefore(p,fs,top,items.get(items.size() - 1),cacheConf); assertEquals(items.get(items.size() - 2),foundKeyValue); foundKeyValue=doTestOfSeekBefore(p,fs,bottom,items.get(items.size() - 1),cacheConf); assertEquals(beforeMidKey,foundKeyValue); foundKeyValue=doTestOfSeekBefore(p,fs,top,items.get(0),cacheConf); assertNull(foundKeyValue); foundKeyValue=doTestOfSeekBefore(p,fs,bottom,items.get(0),cacheConf); assertNull(foundKeyValue); foundKeyValue=doTestOfSeekBefore(p,fs,top,items.get(1),cacheConf); assertNull(foundKeyValue); foundKeyValue=doTestOfSeekBefore(p,fs,bottom,items.get(1),cacheConf); assertEquals(items.get(0),foundKeyValue); foundKeyValue=doTestOfSeekBefore(p,fs,top,midKV,cacheConf); assertNull(foundKeyValue); }

Class: org.apache.hadoop.hbase.io.TestHeapSize

APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
/** * Testing the classes that implements HeapSize and are a part of 0.20. * Some are not tested here for example BlockIndex which is tested in * TestHFile since it is a non public class * @throws IOException */ @Test public void testSizes() throws IOException { Class cl; long expected; long actual; cl=KeyValue.class; expected=ClassSize.estimateBase(cl,false); KeyValue kv=new KeyValue(); actual=kv.heapSize(); if (expected != actual) { ClassSize.estimateBase(cl,true); assertEquals(expected,actual); } cl=LruBlockCache.class; actual=LruBlockCache.CACHE_FIXED_OVERHEAD; expected=ClassSize.estimateBase(cl,false); if (expected != actual) { ClassSize.estimateBase(cl,true); assertEquals(expected,actual); } cl=LruCachedBlock.class; actual=LruCachedBlock.PER_BLOCK_OVERHEAD; expected=ClassSize.estimateBase(cl,false); expected+=ClassSize.estimateBase(String.class,false); expected+=ClassSize.estimateBase(ByteBuffer.class,false); if (expected != actual) { ClassSize.estimateBase(cl,true); ClassSize.estimateBase(String.class,true); ClassSize.estimateBase(ByteBuffer.class,true); assertEquals(expected,actual); } cl=DefaultMemStore.class; actual=DefaultMemStore.FIXED_OVERHEAD; expected=ClassSize.estimateBase(cl,false); if (expected != actual) { ClassSize.estimateBase(cl,true); assertEquals(expected,actual); } actual=DefaultMemStore.DEEP_OVERHEAD; expected=ClassSize.estimateBase(cl,false); expected+=ClassSize.estimateBase(AtomicLong.class,false); expected+=(2 * ClassSize.estimateBase(CellSkipListSet.class,false)); expected+=(2 * ClassSize.estimateBase(ConcurrentSkipListMap.class,false)); expected+=(2 * ClassSize.estimateBase(TimeRangeTracker.class,false)); if (expected != actual) { ClassSize.estimateBase(cl,true); ClassSize.estimateBase(AtomicLong.class,true); ClassSize.estimateBase(CellSkipListSet.class,true); ClassSize.estimateBase(CellSkipListSet.class,true); ClassSize.estimateBase(ConcurrentSkipListMap.class,true); ClassSize.estimateBase(ConcurrentSkipListMap.class,true); ClassSize.estimateBase(TimeRangeTracker.class,true); ClassSize.estimateBase(TimeRangeTracker.class,true); assertEquals(expected,actual); } cl=HStore.class; actual=HStore.FIXED_OVERHEAD; expected=ClassSize.estimateBase(cl,false); if (expected != actual) { ClassSize.estimateBase(cl,true); assertEquals(expected,actual); } cl=HRegion.class; actual=HRegion.FIXED_OVERHEAD; expected=ClassSize.estimateBase(cl,false); if (expected != actual) { ClassSize.estimateBase(cl,true); assertEquals(expected,actual); } cl=BlockCacheKey.class; actual=BlockCacheKey.FIXED_OVERHEAD; expected=ClassSize.estimateBase(cl,false); if (expected != actual) { ClassSize.estimateBase(cl,true); assertEquals(expected,actual); } }

Class: org.apache.hadoop.hbase.io.TestMultiByteBuffInputStream

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testReads() throws Exception { ByteArrayOutputStream bos=new ByteArrayOutputStream(100); DataOutputStream dos=new DataOutputStream(bos); String s="test"; int i=128; dos.write(1); dos.writeInt(i); dos.writeBytes(s); dos.writeLong(12345L); dos.writeShort(2); dos.flush(); ByteBuffer bb=ByteBuffer.wrap(bos.toByteArray()); ByteBuffInputStream bbis=new ByteBuffInputStream(new MultiByteBuff(bb)); assertEquals(15 + s.length(),bbis.available()); assertEquals(1,bbis.read()); byte[] ib=new byte[4]; bbis.read(ib); assertEquals(i,Bytes.toInt(ib)); byte[] sb=new byte[s.length()]; bbis.read(sb); assertEquals(s,Bytes.toString(sb)); byte[] lb=new byte[8]; bbis.read(lb); assertEquals(12345,Bytes.toLong(lb)); assertEquals(2,bbis.available()); ib=new byte[4]; int read=bbis.read(ib,0,ib.length); assertEquals(2,read); assertEquals(2,Bytes.toShort(ib)); assertEquals(0,bbis.available()); assertEquals(-1,bbis.read()); bbis.close(); bb=ByteBuffer.wrap(bos.toByteArray()); bbis=new ByteBuffInputStream(new MultiByteBuff(bb)); DataInputStream dis=new DataInputStream(bbis); dis.read(); assertEquals(i,dis.readInt()); dis.close(); }

Class: org.apache.hadoop.hbase.io.TestTagCompressionContext

InternalCallVerifier BooleanVerifier 
@Test public void testCompressUncompressTags2() throws Exception { ByteArrayOutputStream baos=new ByteArrayOutputStream(); TagCompressionContext context=new TagCompressionContext(LRUDictionary.class,Byte.MAX_VALUE); KeyValue kv1=createKVWithTags(1); int tagsLength1=kv1.getTagsLength(); context.compressTags(baos,kv1.getTagsArray(),kv1.getTagsOffset(),tagsLength1); KeyValue kv2=createKVWithTags(3); int tagsLength2=kv2.getTagsLength(); context.compressTags(baos,kv2.getTagsArray(),kv2.getTagsOffset(),tagsLength2); context.clear(); ByteArrayInputStream bais=new ByteArrayInputStream(baos.getBuffer()); byte[] dest=new byte[tagsLength1]; context.uncompressTags(bais,dest,0,tagsLength1); assertTrue(Bytes.equals(kv1.getTagsArray(),kv1.getTagsOffset(),tagsLength1,dest,0,tagsLength1)); dest=new byte[tagsLength2]; context.uncompressTags(bais,dest,0,tagsLength2); assertTrue(Bytes.equals(kv2.getTagsArray(),kv2.getTagsOffset(),tagsLength2,dest,0,tagsLength2)); }

InternalCallVerifier BooleanVerifier 
@Test public void testCompressUncompressTags1() throws Exception { ByteArrayOutputStream baos=new ByteArrayOutputStream(); TagCompressionContext context=new TagCompressionContext(LRUDictionary.class,Byte.MAX_VALUE); KeyValue kv1=createKVWithTags(2); int tagsLength1=kv1.getTagsLength(); ByteBuffer ib=ByteBuffer.wrap(kv1.getTagsArray()); context.compressTags(baos,ib,kv1.getTagsOffset(),tagsLength1); KeyValue kv2=createKVWithTags(3); int tagsLength2=kv2.getTagsLength(); ib=ByteBuffer.wrap(kv2.getTagsArray()); context.compressTags(baos,ib,kv2.getTagsOffset(),tagsLength2); context.clear(); byte[] dest=new byte[tagsLength1]; ByteBuffer ob=ByteBuffer.wrap(baos.toByteArray()); context.uncompressTags(new SingleByteBuff(ob),dest,0,tagsLength1); assertTrue(Bytes.equals(kv1.getTagsArray(),kv1.getTagsOffset(),tagsLength1,dest,0,tagsLength1)); dest=new byte[tagsLength2]; context.uncompressTags(new SingleByteBuff(ob),dest,0,tagsLength2); assertTrue(Bytes.equals(kv2.getTagsArray(),kv2.getTagsOffset(),tagsLength2,dest,0,tagsLength2)); }

InternalCallVerifier BooleanVerifier 
@Test public void testCompressUncompressTagsWithOffheapKeyValue1() throws Exception { ByteArrayOutputStream baos=new ByteArrayOutputStream(); DataOutputStream daos=new ByteBufferSupportDataOutputStream(baos); TagCompressionContext context=new TagCompressionContext(LRUDictionary.class,Byte.MAX_VALUE); ByteBufferedCell kv1=(ByteBufferedCell)createOffheapKVWithTags(2); int tagsLength1=kv1.getTagsLength(); context.compressTags(daos,kv1.getTagsByteBuffer(),kv1.getTagsPosition(),tagsLength1); ByteBufferedCell kv2=(ByteBufferedCell)createOffheapKVWithTags(3); int tagsLength2=kv2.getTagsLength(); context.compressTags(daos,kv2.getTagsByteBuffer(),kv2.getTagsPosition(),tagsLength2); context.clear(); byte[] dest=new byte[tagsLength1]; ByteBuffer ob=ByteBuffer.wrap(baos.getBuffer()); context.uncompressTags(new SingleByteBuff(ob),dest,0,tagsLength1); assertTrue(Bytes.equals(kv1.getTagsArray(),kv1.getTagsOffset(),tagsLength1,dest,0,tagsLength1)); dest=new byte[tagsLength2]; context.uncompressTags(new SingleByteBuff(ob),dest,0,tagsLength2); assertTrue(Bytes.equals(kv2.getTagsArray(),kv2.getTagsOffset(),tagsLength2,dest,0,tagsLength2)); }

InternalCallVerifier BooleanVerifier 
@Test public void testCompressUncompressTagsWithOffheapKeyValue2() throws Exception { ByteArrayOutputStream baos=new ByteArrayOutputStream(); DataOutputStream daos=new ByteBufferSupportDataOutputStream(baos); TagCompressionContext context=new TagCompressionContext(LRUDictionary.class,Byte.MAX_VALUE); ByteBufferedCell kv1=(ByteBufferedCell)createOffheapKVWithTags(1); int tagsLength1=kv1.getTagsLength(); context.compressTags(daos,kv1.getTagsByteBuffer(),kv1.getTagsPosition(),tagsLength1); ByteBufferedCell kv2=(ByteBufferedCell)createOffheapKVWithTags(3); int tagsLength2=kv2.getTagsLength(); context.compressTags(daos,kv2.getTagsByteBuffer(),kv2.getTagsPosition(),tagsLength2); context.clear(); ByteArrayInputStream bais=new ByteArrayInputStream(baos.getBuffer()); byte[] dest=new byte[tagsLength1]; context.uncompressTags(bais,dest,0,tagsLength1); assertTrue(Bytes.equals(kv1.getTagsArray(),kv1.getTagsOffset(),tagsLength1,dest,0,tagsLength1)); dest=new byte[tagsLength2]; context.uncompressTags(bais,dest,0,tagsLength2); assertTrue(Bytes.equals(kv2.getTagsArray(),kv2.getTagsOffset(),tagsLength2,dest,0,tagsLength2)); }

Class: org.apache.hadoop.hbase.io.crypto.TestCipherProvider

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCustomProvider(){ Configuration conf=HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY,MyCipherProvider.class.getName()); CipherProvider provider=Encryption.getCipherProvider(conf); assertTrue(provider instanceof MyCipherProvider); assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains("TEST")); Cipher a=Encryption.getCipher(conf,"TEST"); assertNotNull(a); assertTrue(a.getProvider() instanceof MyCipherProvider); assertEquals(a.getName(),"TEST"); assertEquals(a.getKeyLength(),0); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDefaultProvider(){ Configuration conf=HBaseConfiguration.create(); CipherProvider provider=Encryption.getCipherProvider(conf); assertTrue(provider instanceof DefaultCipherProvider); String algorithm=conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains(algorithm)); Cipher a=Encryption.getCipher(conf,algorithm); assertNotNull(a); assertTrue(a.getProvider() instanceof DefaultCipherProvider); assertEquals(a.getName(),algorithm); assertEquals(a.getKeyLength(),AES.KEY_LENGTH); }

Class: org.apache.hadoop.hbase.io.crypto.TestKeyProvider

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testTestProvider(){ Configuration conf=HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,KeyProviderForTesting.class.getName()); KeyProvider provider=Encryption.getKeyProvider(conf); assertNotNull("Null returned for provider",provider); assertTrue("Provider is not the expected type",provider instanceof KeyProviderForTesting); Key key=provider.getKey("foo"); assertNotNull("Test provider did not return a key as expected",key); assertEquals("Test provider did not create a key for AES",key.getAlgorithm(),"AES"); assertEquals("Test provider did not create a key of adequate length",key.getEncoded().length,AES.KEY_LENGTH); }

Class: org.apache.hadoop.hbase.io.crypto.TestKeyStoreKeyProvider

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testKeyStoreKeyProviderWithPassword() throws Exception { KeyProvider provider=new KeyStoreKeyProvider(); provider.init("jceks://" + storeFile.toURI().getPath() + "?password="+ PASSWORD); Key key=provider.getKey(ALIAS); assertNotNull(key); byte[] keyBytes=key.getEncoded(); assertEquals(keyBytes.length,KEY.length); for (int i=0; i < KEY.length; i++) { assertEquals(keyBytes[i],KEY[i]); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testKeyStoreKeyProviderWithPasswordFile() throws Exception { KeyProvider provider=new KeyStoreKeyProvider(); provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile="+ URLEncoder.encode(passwordFile.getAbsolutePath(),"UTF-8")); Key key=provider.getKey(ALIAS); assertNotNull(key); byte[] keyBytes=key.getEncoded(); assertEquals(keyBytes.length,KEY.length); for (int i=0; i < KEY.length; i++) { assertEquals(keyBytes[i],KEY[i]); } }

Class: org.apache.hadoop.hbase.io.crypto.aes.TestAES

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAESAlgorithm() throws Exception { Configuration conf=HBaseConfiguration.create(); Cipher aes=Encryption.getCipher(conf,"AES"); assertEquals(aes.getKeyLength(),AES.KEY_LENGTH); assertEquals(aes.getIvLength(),AES.IV_LENGTH); Encryptor e=aes.getEncryptor(); e.setKey(new SecretKeySpec(Bytes.fromHex("2b7e151628aed2a6abf7158809cf4f3c"),"AES")); e.setIv(Bytes.fromHex("f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff")); ByteArrayOutputStream out=new ByteArrayOutputStream(); OutputStream cout=e.createEncryptionStream(out); cout.write(Bytes.fromHex("6bc1bee22e409f96e93d7e117393172a")); cout.write(Bytes.fromHex("ae2d8a571e03ac9c9eb76fac45af8e51")); cout.write(Bytes.fromHex("30c81c46a35ce411e5fbc1191a0a52ef")); cout.write(Bytes.fromHex("f69f2445df4f9b17ad2b417be66c3710")); cout.close(); ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray()); byte[] b=new byte[16]; IOUtils.readFully(in,b); assertTrue("Failed #1",Bytes.equals(b,Bytes.fromHex("874d6191b620e3261bef6864990db6ce"))); IOUtils.readFully(in,b); assertTrue("Failed #2",Bytes.equals(b,Bytes.fromHex("9806f66b7970fdff8617187bb9fffdff"))); IOUtils.readFully(in,b); assertTrue("Failed #3",Bytes.equals(b,Bytes.fromHex("5ae4df3edbd5d35e5b4f09020db03eab"))); IOUtils.readFully(in,b); assertTrue("Failed #4",Bytes.equals(b,Bytes.fromHex("1e031dda2fbe03d1792170a0f3009cee"))); }

Class: org.apache.hadoop.hbase.io.encoding.TestBufferedDataBlockEncoder

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testEnsureSpaceForKey(){ BufferedDataBlockEncoder.SeekerState state=new BufferedDataBlockEncoder.SeekerState(new ObjectIntPair(),false); for (int i=1; i <= 65536; ++i) { state.keyLength=i; state.ensureSpaceForKey(); state.keyBuffer[state.keyLength - 1]=(byte)((i - 1) % 0xff); for (int j=0; j < i - 1; ++j) { assertEquals((byte)(j % 0xff),state.keyBuffer[j]); } } }

Class: org.apache.hadoop.hbase.io.encoding.TestDataBlockEncoders

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier 
@Test public void testNextOnSample() throws IOException { List sampleKv=generator.generateTestKeyValues(NUMBER_OF_KV,includesTags); for ( DataBlockEncoding encoding : DataBlockEncoding.values()) { if (this.useOffheapData && encoding == DataBlockEncoding.PREFIX_TREE) continue; if (encoding.getEncoder() == null) { continue; } DataBlockEncoder encoder=encoding.getEncoder(); ByteBuffer encodedBuffer=encodeKeyValues(encoding,sampleKv,getEncodingContext(Compression.Algorithm.NONE,encoding),this.useOffheapData); HFileContext meta=new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); DataBlockEncoder.EncodedSeeker seeker=encoder.createSeeker(CellComparator.COMPARATOR,encoder.newDataBlockDecodingContext(meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); int i=0; do { KeyValue expectedKeyValue=sampleKv.get(i); Cell cell=seeker.getCell(); if (CellComparator.COMPARATOR.compareKeyIgnoresMvcc(expectedKeyValue,cell) != 0) { int commonPrefix=CellUtil.findCommonPrefixInFlatKey(expectedKeyValue,cell,false,true); fail(String.format("next() produces wrong results " + "encoder: %s i: %d commonPrefix: %d" + "\n expected %s\n actual %s",encoder.toString(),i,commonPrefix,Bytes.toStringBinary(expectedKeyValue.getBuffer(),expectedKeyValue.getKeyOffset(),expectedKeyValue.getKeyLength()),CellUtil.toString(cell,false))); } i++; } while (seeker.next()); } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier 
/** * Test whether the decompression of first key is implemented correctly. * @throws IOException */ @Test public void testFirstKeyInBlockOnSample() throws IOException { List sampleKv=generator.generateTestKeyValues(NUMBER_OF_KV,includesTags); for ( DataBlockEncoding encoding : DataBlockEncoding.values()) { if (this.useOffheapData && encoding == DataBlockEncoding.PREFIX_TREE) continue; if (encoding.getEncoder() == null) { continue; } DataBlockEncoder encoder=encoding.getEncoder(); ByteBuffer encodedBuffer=encodeKeyValues(encoding,sampleKv,getEncodingContext(Compression.Algorithm.NONE,encoding),this.useOffheapData); Cell key=encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer)); KeyValue firstKv=sampleKv.get(0); if (0 != CellComparator.COMPARATOR.compareKeyIgnoresMvcc(key,firstKv)) { int commonPrefix=CellUtil.findCommonPrefixInFlatKey(key,firstKv,false,true); fail(String.format("Bug in '%s' commonPrefix %d",encoder.toString(),commonPrefix)); } } }

Class: org.apache.hadoop.hbase.io.encoding.TestEncodedSeekers

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEncodedSeeker() throws IOException { System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "+ includeTags+ ", compressTags : "+ compressTags); if (includeTags) { testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY,3); } LruBlockCache cache=(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache(); cache.clearCache(); HColumnDescriptor hcd=(new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).setDataBlockEncoding(encoding).setBlocksize(BLOCK_SIZE).setBloomFilterType(BloomType.NONE).setCompressTags(compressTags); Region region=testUtil.createTestRegion(TABLE_NAME,hcd); doPuts(region); doGets(region); region.compact(false); doGets(region); Map encodingCounts=cache.getEncodingCountsForTest(); System.err.println("encodingCounts=" + encodingCounts); assertEquals(1,encodingCounts.size()); DataBlockEncoding encodingInCache=encodingCounts.keySet().iterator().next(); assertEquals(encoding,encodingInCache); assertTrue(encodingCounts.get(encodingInCache) > 0); }

Class: org.apache.hadoop.hbase.io.encoding.TestPrefixTree

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHBASE11728() throws Exception { Put put=new Put(Bytes.toBytes("a-b-0-0")); put.addColumn(fam,qual1,Bytes.toBytes("c1-value")); region.put(put); put=new Put(row1_bytes); put.addColumn(fam,qual1,Bytes.toBytes("c1-value")); region.put(put); put=new Put(row2_bytes); put.addColumn(fam,qual2,Bytes.toBytes("c2-value")); region.put(put); put=new Put(row3_bytes); put.addColumn(fam,qual2,Bytes.toBytes("c2-value-2")); region.put(put); put=new Put(row4_bytes); put.addColumn(fam,qual2,Bytes.toBytes("c2-value-3")); region.put(put); region.flush(true); String[] rows=new String[3]; rows[0]=row1; rows[1]=row2; rows[2]=row3; byte[][] val=new byte[3][]; val[0]=Bytes.toBytes("c1-value"); val[1]=Bytes.toBytes("c2-value"); val[2]=Bytes.toBytes("c2-value-2"); Scan scan=new Scan(); scan.setStartRow(row1_bytes); scan.setStopRow(Bytes.toBytes("a-b-A-1:")); RegionScanner scanner=region.getScanner(scan); List cells=new ArrayList(); for (int i=0; i < 3; i++) { assertEquals(i < 2,scanner.next(cells)); CellScanner cellScanner=Result.create(cells).cellScanner(); while (cellScanner.advance()) { assertEquals(rows[i],Bytes.toString(cellScanner.current().getRowArray(),cellScanner.current().getRowOffset(),cellScanner.current().getRowLength())); assertEquals(Bytes.toString(val[i]),Bytes.toString(cellScanner.current().getValueArray(),cellScanner.current().getValueOffset(),cellScanner.current().getValueLength())); } cells.clear(); } scanner.close(); scan=new Scan(); scan.addColumn(fam,qual2); scan.setStartRow(row1_bytes); scan.setStopRow(Bytes.toBytes("a-b-A-1:")); scanner=region.getScanner(scan); for (int i=1; i < 3; i++) { assertEquals(i < 2,scanner.next(cells)); CellScanner cellScanner=Result.create(cells).cellScanner(); while (cellScanner.advance()) { assertEquals(rows[i],Bytes.toString(cellScanner.current().getRowArray(),cellScanner.current().getRowOffset(),cellScanner.current().getRowLength())); } cells.clear(); } scanner.close(); scan=new Scan(); scan.addColumn(fam,qual2); scan.setStartRow(Bytes.toBytes("a-b-A-1-")); scan.setStopRow(Bytes.toBytes("a-b-A-1:")); scanner=region.getScanner(scan); for (int i=1; i < 3; i++) { assertEquals(i < 2,scanner.next(cells)); CellScanner cellScanner=Result.create(cells).cellScanner(); while (cellScanner.advance()) { assertEquals(rows[i],Bytes.toString(cellScanner.current().getRowArray(),cellScanner.current().getRowOffset(),cellScanner.current().getRowLength())); } cells.clear(); } scanner.close(); scan=new Scan(); scan.addColumn(fam,qual2); scan.setStartRow(Bytes.toBytes("a-b-A-1-140239")); scan.setStopRow(Bytes.toBytes("a-b-A-1:")); scanner=region.getScanner(scan); assertFalse(scanner.next(cells)); assertFalse(cells.isEmpty()); scanner.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHBASE12817() throws IOException { for (int i=0; i < 100; i++) { region.put(new Put(Bytes.toBytes("obj" + (2900 + i))).addColumn(fam,qual1,Bytes.toBytes(i))); } region.put(new Put(Bytes.toBytes("obj299")).addColumn(fam,qual1,Bytes.toBytes("whatever"))); region.put(new Put(Bytes.toBytes("obj29")).addColumn(fam,qual1,Bytes.toBytes("whatever"))); region.put(new Put(Bytes.toBytes("obj2")).addColumn(fam,qual1,Bytes.toBytes("whatever"))); region.put(new Put(Bytes.toBytes("obj3")).addColumn(fam,qual1,Bytes.toBytes("whatever"))); region.flush(true); Scan scan=new Scan(Bytes.toBytes("obj29995")); RegionScanner scanner=region.getScanner(scan); List cells=new ArrayList(); assertFalse(scanner.next(cells)); assertArrayEquals(Bytes.toBytes("obj3"),Result.create(cells).getRow()); }

Class: org.apache.hadoop.hbase.io.encoding.TestPrefixTreeEncoding

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testScanWithRandomData() throws Exception { PrefixTreeCodec encoder=new PrefixTreeCodec(); ByteArrayOutputStream baosInMemory=new ByteArrayOutputStream(); DataOutputStream userDataStream=new DataOutputStream(baosInMemory); HFileContext meta=new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build(); HFileBlockEncodingContext blkEncodingCtx=new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE,new byte[0],meta); generateRandomTestData(kvset,numBatchesWritten++,includesTag,encoder,blkEncodingCtx,userDataStream); EncodedSeeker seeker=encoder.createSeeker(CellComparator.COMPARATOR,encoder.newDataBlockDecodingContext(meta)); byte[] onDiskBytes=baosInMemory.toByteArray(); ByteBuffer readBuffer=ByteBuffer.wrap(onDiskBytes,DataBlockEncoding.ID_SIZE,onDiskBytes.length - DataBlockEncoding.ID_SIZE); seeker.setCurrentBuffer(new SingleByteBuff(readBuffer)); Cell previousKV=null; do { Cell currentKV=seeker.getCell(); System.out.println(currentKV); if (previousKV != null && CellComparator.COMPARATOR.compare(currentKV,previousKV) < 0) { dumpInputKVSet(); fail("Current kv " + currentKV + " is smaller than previous keyvalue "+ previousKV); } if (!includesTag) { assertFalse(currentKV.getTagsLength() > 0); } else { Assert.assertTrue(currentKV.getTagsLength() > 0); } previousKV=currentKV; } while (seeker.next()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSeekBeforeWithFixedData() throws Exception { formatRowNum=true; PrefixTreeCodec encoder=new PrefixTreeCodec(); int batchId=numBatchesWritten++; HFileContext meta=new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build(); HFileBlockEncodingContext blkEncodingCtx=new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE,new byte[0],meta); ByteArrayOutputStream baosInMemory=new ByteArrayOutputStream(); DataOutputStream userDataStream=new DataOutputStream(baosInMemory); generateFixedTestData(kvset,batchId,false,includesTag,encoder,blkEncodingCtx,userDataStream); EncodedSeeker seeker=encoder.createSeeker(CellComparator.COMPARATOR,encoder.newDataBlockDecodingContext(meta)); byte[] onDiskBytes=baosInMemory.toByteArray(); ByteBuffer readBuffer=ByteBuffer.wrap(onDiskBytes,DataBlockEncoding.ID_SIZE,onDiskBytes.length - DataBlockEncoding.ID_SIZE); seeker.setCurrentBuffer(new SingleByteBuff(readBuffer)); Cell seekKey=CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId,0),CF_BYTES); seeker.seekToKeyInBlock(seekKey,true); assertEquals(null,seeker.getCell()); seekKey=CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId,NUM_ROWS_PER_BATCH / 3),CF_BYTES); seeker.seekToKeyInBlock(seekKey,true); assertNotNull(seeker.getCell()); assertArrayEquals(getRowKey(batchId,NUM_ROWS_PER_BATCH / 3 - 1),CellUtil.cloneRow(seeker.getCell())); seekKey=CellUtil.createFirstDeleteFamilyCellOnRow(Bytes.toBytes("zzzz"),CF_BYTES); seeker.seekToKeyInBlock(seekKey,true); assertNotNull(seeker.getCell()); assertArrayEquals(getRowKey(batchId,NUM_ROWS_PER_BATCH - 1),CellUtil.cloneRow(seeker.getCell())); }

Class: org.apache.hadoop.hbase.io.hfile.TestBlockCacheReporting

InternalCallVerifier BooleanVerifier 
@Test public void testLruBlockCache() throws JsonGenerationException, JsonMappingException, IOException { CacheConfig cc=new CacheConfig(this.conf); assertTrue(cc.isBlockCacheEnabled()); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); logPerBlock(cc.getBlockCache()); addDataAndHits(cc.getBlockCache(),3); BlockCache bc=cc.getBlockCache(); LOG.info("count=" + bc.getBlockCount() + ", currentSize="+ bc.getCurrentSize()+ ", freeSize="+ bc.getFreeSize()); LOG.info(cc.getBlockCache().getStats()); BlockCacheUtil.CachedBlocksByFile cbsbf=logPerBlock(cc.getBlockCache()); LOG.info(cbsbf); logPerFile(cbsbf); bucketCacheReport(cc.getBlockCache()); LOG.info(BlockCacheUtil.toJSON(cbsbf)); }

Class: org.apache.hadoop.hbase.io.hfile.TestCacheConfig

InternalCallVerifier BooleanVerifier 
@Test public void testCacheConfigDefaultLRUBlockCache(){ CacheConfig cc=new CacheConfig(this.conf); assertTrue(cc.isBlockCacheEnabled()); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); basicBlockCacheOps(cc,false,true); assertTrue(cc.getBlockCache() instanceof LruBlockCache); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy * LruBlockCache as L1 with a BucketCache for L2. */ @Test(timeout=10000) public void testBucketCacheConfigL1L2Setup(){ this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY,"offheap"); this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,0.001f); MemoryUsage mu=ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); long lruExpectedSize=CacheConfig.getLruCacheSize(this.conf,mu); final int bcSize=100; long bcExpectedSize=100 * 1024 * 1024; assertTrue(lruExpectedSize < bcExpectedSize); this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY,bcSize); this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY,false); CacheConfig cc=new CacheConfig(this.conf); basicBlockCacheOps(cc,false,false); assertTrue(cc.getBlockCache() instanceof LruBlockCache); LruBlockCache lbc=(LruBlockCache)cc.getBlockCache(); assertEquals(lruExpectedSize,lbc.getMaxSize()); BlockCache bc=lbc.getVictimHandler(); assertEquals(bcExpectedSize,((BucketCache)bc).getMaxSize()); long initialL1BlockCount=lbc.getBlockCount(); long initialL2BlockCount=bc.getBlockCount(); Cacheable c=new DataCacheEntry(); BlockCacheKey bck=new BlockCacheKey("bck",0); lbc.cacheBlock(bck,c,false,false); assertEquals(initialL1BlockCount + 1,lbc.getBlockCount()); assertEquals(initialL2BlockCount,bc.getBlockCount()); final long justTooBigSize=lbc.acceptableSize() + 1; lbc.cacheBlock(new BlockCacheKey("bck2",0),new DataCacheEntry(){ @Override public long heapSize(){ return justTooBigSize; } @Override public int getSerializedLength(){ return (int)heapSize(); } } ); while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10); assertEquals(initialL1BlockCount,lbc.getBlockCount()); long count=bc.getBlockCount(); assertTrue(initialL2BlockCount + 1 <= count); }

Class: org.apache.hadoop.hbase.io.hfile.TestChecksum

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test all checksum types by writing and reading back blocks. */ @Test public void testAllChecksumTypes() throws IOException { List cktypes=new ArrayList<>(Arrays.asList(ChecksumType.values())); for (Iterator itr=cktypes.iterator(); itr.hasNext(); ) { ChecksumType cktype=itr.next(); Path path=new Path(TEST_UTIL.getDataTestDir(),"checksum" + cktype.getName()); FSDataOutputStream os=fs.create(path); HFileContext meta=new HFileContextBuilder().withChecksumType(cktype).build(); HFileBlock.Writer hbw=new HFileBlock.Writer(null,meta); DataOutputStream dos=hbw.startWriting(BlockType.DATA); for (int i=0; i < 1000; ++i) dos.writeInt(i); hbw.writeHeaderAndData(os); int totalSize=hbw.getOnDiskSizeWithHeader(); os.close(); assertEquals(true,hfs.useHBaseChecksum()); FSDataInputStreamWrapper is=new FSDataInputStreamWrapper(fs,path); meta=new HFileContextBuilder().withHBaseCheckSum(true).build(); HFileBlock.FSReader hbr=new HFileBlock.FSReaderImpl(is,totalSize,(HFileSystem)fs,path,meta); HFileBlock b=hbr.readBlockData(0,-1,-1,false); ByteBuff data=b.getBufferWithoutHeader(); for (int i=0; i < 1000; i++) { assertEquals(i,data.getInt()); } boolean exception_thrown=false; try { data.getInt(); } catch ( BufferUnderflowException e) { exception_thrown=true; } assertTrue(exception_thrown); assertEquals(0,HFile.getChecksumFailuresCount()); } }

Class: org.apache.hadoop.hbase.io.hfile.TestCombinedBlockCache

InternalCallVerifier EqualityVerifier 
@Test public void testCombinedCacheStats(){ CacheStats lruCacheStats=new CacheStats("lruCacheStats",2); CacheStats bucketCacheStats=new CacheStats("bucketCacheStats",2); CombinedCacheStats stats=new CombinedCacheStats(lruCacheStats,bucketCacheStats); double delta=0.01; lruCacheStats.hit(true); lruCacheStats.miss(true,false); bucketCacheStats.hit(false); bucketCacheStats.hit(false); bucketCacheStats.miss(false,true); assertEquals(5,stats.getRequestCount()); assertEquals(2,stats.getRequestCachingCount()); assertEquals(2,stats.getMissCount()); assertEquals(1,stats.getPrimaryMissCount()); assertEquals(1,stats.getMissCachingCount()); assertEquals(3,stats.getHitCount()); assertEquals(3,stats.getPrimaryHitCount()); assertEquals(1,stats.getHitCachingCount()); assertEquals(0.6,stats.getHitRatio(),delta); assertEquals(0.5,stats.getHitCachingRatio(),delta); assertEquals(0.4,stats.getMissRatio(),delta); assertEquals(0.5,stats.getMissCachingRatio(),delta); lruCacheStats.evicted(1000,true); lruCacheStats.evicted(1000,false); lruCacheStats.evict(); bucketCacheStats.evict(); assertEquals(2,stats.getEvictionCount()); assertEquals(2,stats.getEvictedCount()); assertEquals(1,stats.getPrimaryEvictedCount()); assertEquals(1.0,stats.evictedPerEviction(),delta); lruCacheStats.failInsert(); assertEquals(1,stats.getFailedInserts()); stats.rollMetricsPeriod(); assertEquals(3,stats.getSumHitCountsPastNPeriods()); assertEquals(5,stats.getSumRequestCountsPastNPeriods()); assertEquals(1,stats.getSumHitCachingCountsPastNPeriods()); assertEquals(2,stats.getSumRequestCachingCountsPastNPeriods()); assertEquals(0.6,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.5,stats.getHitCachingRatioPastNPeriods(),delta); lruCacheStats.hit(true); lruCacheStats.hit(true); lruCacheStats.hit(true); stats.rollMetricsPeriod(); assertEquals(6,stats.getSumHitCountsPastNPeriods()); assertEquals(8,stats.getSumRequestCountsPastNPeriods()); assertEquals(4,stats.getSumHitCachingCountsPastNPeriods()); assertEquals(5,stats.getSumRequestCachingCountsPastNPeriods()); assertEquals(0.75,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.8,stats.getHitCachingRatioPastNPeriods(),delta); }

Class: org.apache.hadoop.hbase.io.hfile.TestFixedFileTrailer

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testTrailer() throws IOException { FixedFileTrailer t=new FixedFileTrailer(version,HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t.setDataIndexCount(3); t.setEntryCount(((long)Integer.MAX_VALUE) + 1); t.setLastDataBlockOffset(291); t.setNumDataIndexLevels(3); t.setComparatorClass(CellComparator.COMPARATOR.getClass()); t.setFirstDataBlockOffset(9081723123L); t.setUncompressedDataIndexSize(827398717L); t.setLoadOnOpenOffset(128); t.setMetaIndexCount(7); t.setTotalUncompressedBytes(129731987); { DataOutputStream dos=new DataOutputStream(baos); t.serialize(dos); dos.flush(); assertEquals(dos.size(),FixedFileTrailer.getTrailerSize(version)); } byte[] bytes=baos.toByteArray(); baos.reset(); assertEquals(bytes.length,FixedFileTrailer.getTrailerSize(version)); ByteArrayInputStream bais=new ByteArrayInputStream(bytes); { DataInputStream dis=new DataInputStream(bais); FixedFileTrailer t2=new FixedFileTrailer(version,HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t2.deserialize(dis); assertEquals(-1,bais.read()); checkLoadedTrailer(version,t,t2); } Path trailerPath=new Path(util.getDataTestDir(),"trailer_" + version); { for ( byte invalidVersion : new byte[]{HFile.MIN_FORMAT_VERSION - 1,HFile.MAX_FORMAT_VERSION + 1}) { bytes[bytes.length - 1]=invalidVersion; writeTrailer(trailerPath,null,bytes); try { readTrailer(trailerPath); fail("Exception expected"); } catch ( IllegalArgumentException ex) { String msg=ex.getMessage(); String cleanMsg=msg.replaceAll("^(java(\\.[a-zA-Z]+)+:\\s+)?|\\s+\\(.*\\)\\s*$",""); assertEquals("Actual exception message is \"" + msg + "\".\n"+ "Cleaned-up message","Invalid HFile version: " + invalidVersion,cleanMsg); LOG.info("Got an expected exception: " + msg); } } } writeTrailer(trailerPath,t,null); FixedFileTrailer t4=readTrailer(trailerPath); checkLoadedTrailer(version,t,t4); String trailerStr=t.toString(); assertEquals("Invalid number of fields in the string representation " + "of the trailer: " + trailerStr,NUM_FIELDS_BY_VERSION[version - 2],trailerStr.split(", ").length); assertEquals(trailerStr,t4.toString()); }

Class: org.apache.hadoop.hbase.io.hfile.TestForceCacheImportantBlocks

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCacheBlocks() throws IOException { TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,BLOCK_SIZE); HColumnDescriptor hcd=new HColumnDescriptor(Bytes.toBytes(CF)).setMaxVersions(MAX_VERSIONS).setCompressionType(COMPRESSION_ALGORITHM).setBloomFilterType(BLOOM_TYPE); hcd.setBlocksize(BLOCK_SIZE); hcd.setBlockCacheEnabled(cfCacheEnabled); Region region=TEST_UTIL.createTestRegion(TABLE,hcd); BlockCache cache=region.getStore(hcd.getName()).getCacheConfig().getBlockCache(); CacheStats stats=cache.getStats(); writeTestData(region); assertEquals(0,stats.getHitCount()); assertEquals(0,HFile.dataBlockReadCnt.get()); region.get(new Get(Bytes.toBytes("row" + 0))); assertTrue(stats.getHitCount() > 0); assertTrue(HFile.dataBlockReadCnt.get() > 0); long missCount=stats.getMissCount(); region.get(new Get(Bytes.toBytes("row" + 0))); if (this.cfCacheEnabled) assertEquals(missCount,stats.getMissCount()); else assertTrue(stats.getMissCount() > missCount); }

Class: org.apache.hadoop.hbase.io.hfile.TestHFile

APIUtilityVerifier InternalCallVerifier NullVerifier 
/** * Test empty HFile. * Test all features work reasonably when hfile is empty of entries. * @throws IOException */ @Test public void testEmptyHFile() throws IOException { if (cacheConf == null) cacheConf=new CacheConfig(conf); Path f=new Path(ROOT_DIR,testName.getMethodName()); HFileContext context=new HFileContextBuilder().withIncludesTags(false).build(); Writer w=HFile.getWriterFactory(conf,cacheConf).withPath(fs,f).withFileContext(context).create(); w.close(); Reader r=HFile.createReader(fs,f,cacheConf,conf); r.loadFileInfo(); assertNull(r.getFirstKey()); assertNull(r.getLastKey()); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testNullMetaBlocks() throws Exception { if (cacheConf == null) cacheConf=new CacheConfig(conf); for ( Compression.Algorithm compressAlgo : HBaseTestingUtility.COMPRESSION_ALGORITHMS) { Path mFile=new Path(ROOT_DIR,"nometa_" + compressAlgo + ".hfile"); FSDataOutputStream fout=createFSOutput(mFile); HFileContext meta=new HFileContextBuilder().withCompression(compressAlgo).withBlockSize(minBlockSize).build(); Writer writer=HFile.getWriterFactory(conf,cacheConf).withOutputStream(fout).withFileContext(meta).create(); KeyValue kv=new KeyValue("foo".getBytes(),"f1".getBytes(),null,"value".getBytes()); writer.append(kv); writer.close(); fout.close(); Reader reader=HFile.createReader(fs,mFile,cacheConf,conf); reader.loadFileInfo(); assertNull(reader.getMetaBlock("non-existant",false)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetShortMidpoint(){ Cell left=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); Cell right=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); Cell mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) <= 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) <= 0); left=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("b"),Bytes.toBytes("a"),Bytes.toBytes("a")); mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) <= 0); left=CellUtil.createCell(Bytes.toBytes("g"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("i"),Bytes.toBytes("a"),Bytes.toBytes("a")); mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) <= 0); left=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("bbbbbbb"),Bytes.toBytes("a"),Bytes.toBytes("a")); mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) < 0); assertEquals(1,(int)mid.getRowLength()); left=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("b"),Bytes.toBytes("a")); mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) <= 0); left=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("aaaaaaaa"),Bytes.toBytes("b")); mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) < 0); assertEquals(2,(int)mid.getFamilyLength()); left=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("aaaaaaaaa")); mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) < 0); assertEquals(2,(int)mid.getQualifierLength()); left=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("a"),Bytes.toBytes("a"),Bytes.toBytes("b")); mid=HFileWriterImpl.getMidpoint(CellComparator.COMPARATOR,left,right); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.COMPARATOR.compareKeyIgnoresMvcc(mid,right) <= 0); assertEquals(1,(int)mid.getQualifierLength()); left=CellUtil.createCell(Bytes.toBytes("g"),Bytes.toBytes("a"),Bytes.toBytes("a")); right=CellUtil.createCell(Bytes.toBytes("i"),Bytes.toBytes("a"),Bytes.toBytes("a")); mid=HFileWriterImpl.getMidpoint(CellComparator.META_COMPARATOR,left,right); assertTrue(CellComparator.META_COMPARATOR.compareKeyIgnoresMvcc(left,mid) < 0); assertTrue(CellComparator.META_COMPARATOR.compareKeyIgnoresMvcc(mid,right) == 0); byte[] rowA=Bytes.toBytes("rowA"); byte[] rowB=Bytes.toBytes("rowB"); byte[] family=Bytes.toBytes("family"); byte[] qualA=Bytes.toBytes("qfA"); byte[] qualB=Bytes.toBytes("qfB"); final CellComparator keyComparator=CellComparator.COMPARATOR; long ts=5; KeyValue kv1=new KeyValue(Bytes.toBytes("the quick brown fox"),family,qualA,ts,Type.Put); KeyValue kv2=new KeyValue(Bytes.toBytes("the who test text"),family,qualA,ts,Type.Put); Cell newKey=HFileWriterImpl.getMidpoint(keyComparator,kv1,kv2); assertTrue(keyComparator.compare(kv1,newKey) < 0); assertTrue((keyComparator.compare(kv2,newKey)) > 0); byte[] expectedArray=Bytes.toBytes("the r"); Bytes.equals(newKey.getRowArray(),newKey.getRowOffset(),newKey.getRowLength(),expectedArray,0,expectedArray.length); kv1=new KeyValue(Bytes.toBytes("ilovehbase"),family,qualA,5,Type.Put); kv2=new KeyValue(Bytes.toBytes("ilovehbase"),family,qualA,0,Type.Put); assertTrue(keyComparator.compare(kv1,kv2) < 0); newKey=HFileWriterImpl.getMidpoint(keyComparator,kv1,kv2); assertTrue(keyComparator.compare(kv1,newKey) < 0); assertTrue((keyComparator.compare(kv2,newKey)) == 0); kv1=new KeyValue(Bytes.toBytes("ilovehbase"),family,qualA,-5,Type.Put); kv2=new KeyValue(Bytes.toBytes("ilovehbase"),family,qualA,-10,Type.Put); assertTrue(keyComparator.compare(kv1,kv2) < 0); newKey=HFileWriterImpl.getMidpoint(keyComparator,kv1,kv2); assertTrue(keyComparator.compare(kv1,newKey) < 0); assertTrue((keyComparator.compare(kv2,newKey)) == 0); kv1=new KeyValue(Bytes.toBytes("ilovehbase"),family,qualA,5,Type.Put); kv2=new KeyValue(Bytes.toBytes("ilovehbase"),family,qualB,5,Type.Put); assertTrue(keyComparator.compare(kv1,kv2) < 0); newKey=HFileWriterImpl.getMidpoint(keyComparator,kv1,kv2); assertTrue(keyComparator.compare(kv1,newKey) < 0); assertTrue((keyComparator.compare(kv2,newKey)) > 0); assertTrue(Arrays.equals(CellUtil.cloneFamily(newKey),family)); assertTrue(Arrays.equals(CellUtil.cloneQualifier(newKey),qualB)); assertTrue(newKey.getTimestamp() == HConstants.LATEST_TIMESTAMP); assertTrue(newKey.getTypeByte() == Type.Maximum.getCode()); final CellComparator metaKeyComparator=CellComparator.META_COMPARATOR; kv1=new KeyValue(Bytes.toBytes("ilovehbase123"),family,qualA,5,Type.Put); kv2=new KeyValue(Bytes.toBytes("ilovehbase234"),family,qualA,0,Type.Put); newKey=HFileWriterImpl.getMidpoint(metaKeyComparator,kv1,kv2); assertTrue(metaKeyComparator.compare(kv1,newKey) < 0); assertTrue((metaKeyComparator.compare(kv2,newKey) == 0)); kv1=new KeyValue(Bytes.toBytes("ilovehbase"),family,qualA,ts,Type.Put); kv2=new KeyValue(Bytes.toBytes("ilovehbaseandhdfs"),family,qualA,ts,Type.Put); assertTrue(keyComparator.compare(kv1,kv2) < 0); newKey=HFileWriterImpl.getMidpoint(keyComparator,kv1,kv2); assertTrue(keyComparator.compare(kv1,newKey) < 0); assertTrue((keyComparator.compare(kv2,newKey)) > 0); expectedArray=Bytes.toBytes("ilovehbasea"); Bytes.equals(newKey.getRowArray(),newKey.getRowOffset(),newKey.getRowLength(),expectedArray,0,expectedArray.length); kv1=new KeyValue(Bytes.toBytes("100abcdefg"),family,qualA,ts,Type.Put); kv2=new KeyValue(Bytes.toBytes("101abcdefg"),family,qualA,ts,Type.Put); assertTrue(keyComparator.compare(kv1,kv2) < 0); newKey=HFileWriterImpl.getMidpoint(keyComparator,kv1,kv2); assertTrue(keyComparator.compare(kv1,newKey) < 0); assertTrue((keyComparator.compare(kv2,newKey)) > 0); expectedArray=Bytes.toBytes("101"); Bytes.equals(newKey.getRowArray(),newKey.getRowOffset(),newKey.getRowLength(),expectedArray,0,expectedArray.length); }

Class: org.apache.hadoop.hbase.io.hfile.TestHFileBlock

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNoCompression() throws IOException { CacheConfig cacheConf=Mockito.mock(CacheConfig.class); Mockito.when(cacheConf.isBlockCacheEnabled()).thenReturn(false); HFileBlock block=createTestV2Block(NONE,includesMemstoreTS,false).getBlockForCaching(cacheConf); assertEquals(4000,block.getUncompressedSizeWithoutHeader()); assertEquals(4004,block.getOnDiskSizeWithoutHeader()); assertTrue(block.isUnpacked()); }

Class: org.apache.hadoop.hbase.io.hfile.TestHFileBlockCompatibility

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReaderV2() throws IOException { if (includesTag) { TEST_UTIL.getConfiguration().setInt("hfile.format.version",3); } for ( Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for ( boolean pread : new boolean[]{false,true}) { LOG.info("testReaderV2: Compression algorithm: " + algo + ", pread="+ pread); Path path=new Path(TEST_UTIL.getDataTestDir(),"blocks_v2_" + algo); FSDataOutputStream os=fs.create(path); Writer hbw=new Writer(algo,null,includesMemstoreTS,includesTag); long totalSize=0; for (int blockId=0; blockId < 2; ++blockId) { DataOutputStream dos=hbw.startWriting(BlockType.DATA); for (int i=0; i < 1234; ++i) dos.writeInt(i); hbw.writeHeaderAndData(os); totalSize+=hbw.getOnDiskSizeWithHeader(); } os.close(); FSDataInputStream is=fs.open(path); HFileContext meta=new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withCompression(algo).build(); HFileBlock.FSReader hbr=new HFileBlock.FSReaderImpl(new FSDataInputStreamWrapper(is),totalSize,fs,path,meta); HFileBlock b=hbr.readBlockData(0,-1,-1,pread); is.close(); b.sanityCheck(); assertEquals(4936,b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936,b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); HFileBlock expected=b; if (algo == GZ) { is=fs.open(path); hbr=new HFileBlock.FSReaderImpl(new FSDataInputStreamWrapper(is),totalSize,fs,path,meta); b=hbr.readBlockData(0,2173 + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM + b.totalChecksumBytes(),-1,pread); assertEquals(expected,b); int wrongCompressedSize=2172; try { b=hbr.readBlockData(0,wrongCompressedSize + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM,-1,pread); fail("Exception expected"); } catch ( IOException ex) { String expectedPrefix="On-disk size without header provided is " + wrongCompressedSize + ", but block header contains "+ b.getOnDiskSizeWithoutHeader()+ "."; assertTrue("Invalid exception message: '" + ex.getMessage() + "'.\nMessage is expected to start with: '"+ expectedPrefix+ "'",ex.getMessage().startsWith(expectedPrefix)); } is.close(); } } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test encoding/decoding data blocks. * @throws IOException a bug or a problem with temporary files. */ @Test public void testDataBlockEncoding() throws IOException { if (includesTag) { TEST_UTIL.getConfiguration().setInt("hfile.format.version",3); } final int numBlocks=5; for ( Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for ( boolean pread : new boolean[]{false,true}) { for ( DataBlockEncoding encoding : DataBlockEncoding.values()) { LOG.info("testDataBlockEncoding algo " + algo + " pread = "+ pread+ " encoding "+ encoding); Path path=new Path(TEST_UTIL.getDataTestDir(),"blocks_v2_" + algo + "_"+ encoding.toString()); FSDataOutputStream os=fs.create(path); HFileDataBlockEncoder dataBlockEncoder=(encoding != DataBlockEncoding.NONE) ? new HFileDataBlockEncoderImpl(encoding) : NoOpDataBlockEncoder.INSTANCE; TestHFileBlockCompatibility.Writer hbw=new TestHFileBlockCompatibility.Writer(algo,dataBlockEncoder,includesMemstoreTS,includesTag); long totalSize=0; final List encodedSizes=new ArrayList(); final List encodedBlocks=new ArrayList(); for (int blockId=0; blockId < numBlocks; ++blockId) { hbw.startWriting(BlockType.DATA); TestHFileBlock.writeTestKeyValues(hbw,blockId,pread,includesTag); hbw.writeHeaderAndData(os); int headerLen=HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; byte[] encodedResultWithHeader=hbw.getUncompressedDataWithHeader(); final int encodedSize=encodedResultWithHeader.length - headerLen; if (encoding != DataBlockEncoding.NONE) { headerLen+=DataBlockEncoding.ID_SIZE; } byte[] encodedDataSection=new byte[encodedResultWithHeader.length - headerLen]; System.arraycopy(encodedResultWithHeader,headerLen,encodedDataSection,0,encodedDataSection.length); final ByteBuffer encodedBuf=ByteBuffer.wrap(encodedDataSection); encodedSizes.add(encodedSize); encodedBlocks.add(encodedBuf); totalSize+=hbw.getOnDiskSizeWithHeader(); } os.close(); FSDataInputStream is=fs.open(path); HFileContext meta=new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withCompression(algo).build(); HFileBlock.FSReaderImpl hbr=new HFileBlock.FSReaderImpl(new FSDataInputStreamWrapper(is),totalSize,fs,path,meta); hbr.setDataBlockEncoder(dataBlockEncoder); hbr.setIncludesMemstoreTS(includesMemstoreTS); HFileBlock b; int pos=0; for (int blockId=0; blockId < numBlocks; ++blockId) { b=hbr.readBlockData(pos,-1,-1,pread); b.sanityCheck(); if (meta.isCompressedOrEncrypted()) { assertFalse(b.isUnpacked()); b=b.unpack(meta,hbr); } pos+=b.getOnDiskSizeWithHeader(); assertEquals((int)encodedSizes.get(blockId),b.getUncompressedSizeWithoutHeader()); ByteBuff actualBuffer=b.getBufferWithoutHeader(); if (encoding != DataBlockEncoding.NONE) { assertEquals(0,actualBuffer.get(0)); assertEquals(encoding.getId(),actualBuffer.get(1)); actualBuffer.position(2); actualBuffer=actualBuffer.slice(); } ByteBuffer expectedBuffer=encodedBlocks.get(blockId); expectedBuffer.rewind(); TestHFileBlock.assertBuffersEqual(new SingleByteBuff(expectedBuffer),actualBuffer,algo,encoding,pread); } is.close(); } } } }

Class: org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Testing block index through the HFile writer/reader APIs. Allows to test * setting index block size through configuration, intermediate-level index * blocks, and caching index blocks on write. * @throws IOException */ @Test public void testHFileWriterAndReader() throws IOException { Path hfilePath=new Path(TEST_UTIL.getDataTestDir(),"hfile_for_block_index"); CacheConfig cacheConf=new CacheConfig(conf); BlockCache blockCache=cacheConf.getBlockCache(); for (int testI=0; testI < INDEX_CHUNK_SIZES.length; ++testI) { int indexBlockSize=INDEX_CHUNK_SIZES[testI]; int expectedNumLevels=EXPECTED_NUM_LEVELS[testI]; LOG.info("Index block size: " + indexBlockSize + ", compression: "+ compr); blockCache.evictBlocksByHfileName(hfilePath.getName()); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,indexBlockSize); Set keyStrSet=new HashSet(); byte[][] keys=new byte[NUM_KV][]; byte[][] values=new byte[NUM_KV][]; { HFileContext meta=new HFileContextBuilder().withBlockSize(SMALL_BLOCK_SIZE).withCompression(compr).build(); HFile.Writer writer=HFile.getWriterFactory(conf,cacheConf).withPath(fs,hfilePath).withFileContext(meta).create(); Random rand=new Random(19231737); byte[] family=Bytes.toBytes("f"); byte[] qualifier=Bytes.toBytes("q"); for (int i=0; i < NUM_KV; ++i) { byte[] row=RandomKeyValueUtil.randomOrderedKey(rand,i); KeyValue kv=new KeyValue(row,family,qualifier,EnvironmentEdgeManager.currentTime(),RandomKeyValueUtil.randomValue(rand)); byte[] k=kv.getKey(); writer.append(kv); keys[i]=k; values[i]=CellUtil.cloneValue(kv); keyStrSet.add(Bytes.toStringBinary(k)); if (i > 0) { assertTrue((CellComparator.COMPARATOR.compare(kv,keys[i - 1],0,keys[i - 1].length)) > 0); } } writer.close(); } HFile.Reader reader=HFile.createReader(fs,hfilePath,cacheConf,conf); assertEquals(expectedNumLevels,reader.getTrailer().getNumDataIndexLevels()); assertTrue(Bytes.equals(keys[0],((KeyValue)reader.getFirstKey()).getKey())); assertTrue(Bytes.equals(keys[NUM_KV - 1],((KeyValue)reader.getLastKey()).getKey())); LOG.info("Last key: " + Bytes.toStringBinary(keys[NUM_KV - 1])); for ( boolean pread : new boolean[]{false,true}) { HFileScanner scanner=reader.getScanner(true,pread); for (int i=0; i < NUM_KV; ++i) { checkSeekTo(keys,scanner,i); checkKeyValue("i=" + i,keys[i],values[i],ByteBuffer.wrap(((KeyValue)scanner.getKey()).getKey()),scanner.getValue()); } assertTrue(scanner.seekTo()); for (int i=NUM_KV - 1; i >= 0; --i) { checkSeekTo(keys,scanner,i); checkKeyValue("i=" + i,keys[i],values[i],ByteBuffer.wrap(((KeyValue)scanner.getKey()).getKey()),scanner.getValue()); } } HFile.Reader reader2=reader; HFileBlock.FSReader fsReader=reader2.getUncachedBlockReader(); HFileBlock.BlockIterator iter=fsReader.blockRange(0,reader.getTrailer().getLoadOnOpenDataOffset()); HFileBlock block; List blockKeys=new ArrayList(); while ((block=iter.nextBlock()) != null) { if (block.getBlockType() != BlockType.LEAF_INDEX) return; ByteBuff b=block.getBufferReadOnly(); int n=b.getIntAfterPosition(0); int entriesOffset=Bytes.SIZEOF_INT * (n + 2); for (int i=0; i < n; ++i) { int keyRelOffset=b.getIntAfterPosition(Bytes.SIZEOF_INT * (i + 1)); int nextKeyRelOffset=b.getIntAfterPosition(Bytes.SIZEOF_INT * (i + 2)); int keyLen=nextKeyRelOffset - keyRelOffset; int keyOffset=b.arrayOffset() + entriesOffset + keyRelOffset+ HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD; byte[] blockKey=Arrays.copyOfRange(b.array(),keyOffset,keyOffset + keyLen); String blockKeyStr=Bytes.toString(blockKey); blockKeys.add(blockKey); assertTrue("Invalid block key from leaf-level block: " + blockKeyStr,keyStrSet.contains(blockKeyStr)); } } assertEquals(Bytes.toStringBinary(blockKeys.get((blockKeys.size() - 1) / 2)),reader.midkey()); assertEquals(UNCOMPRESSED_INDEX_SIZES[testI],reader.getTrailer().getUncompressedDataIndexSize()); reader.close(); reader2.close(); } }

APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
/** * Checks if the HeapSize calculator is within reason */ @Test public void testHeapSizeForBlockIndex() throws IOException { Class cl=HFileBlockIndex.BlockIndexReader.class; long expected=ClassSize.estimateBase(cl,false); HFileBlockIndex.BlockIndexReader bi=new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); long actual=bi.heapSize(); expected-=ClassSize.align(2 * ClassSize.ARRAY); if (expected != actual) { expected=ClassSize.estimateBase(cl,true); assertEquals(expected,actual); } }

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBlockIndexChunk() throws IOException { BlockIndexChunk c=new BlockIndexChunk(); ByteArrayOutputStream baos=new ByteArrayOutputStream(); int N=1000; int[] numSubEntriesAt=new int[N]; int numSubEntries=0; for (int i=0; i < N; ++i) { baos.reset(); DataOutputStream dos=new DataOutputStream(baos); c.writeNonRoot(dos); assertEquals(c.getNonRootSize(),dos.size()); baos.reset(); dos=new DataOutputStream(baos); c.writeRoot(dos); assertEquals(c.getRootSize(),dos.size()); byte[] k=RandomKeyValueUtil.randomOrderedKey(rand,i); numSubEntries+=rand.nextInt(5) + 1; keys.add(k); c.add(k,getDummyFileOffset(i),getDummyOnDiskSize(i),numSubEntries); } for (int i=0; i < N; ++i) { for (int j=i == 0 ? 0 : numSubEntriesAt[i - 1]; j < numSubEntriesAt[i]; ++j) { assertEquals(i,c.getEntryBySubEntry(j)); } } }

Class: org.apache.hadoop.hbase.io.hfile.TestHFileBlockPositionalRead

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testPositionalReadExtraFailed() throws IOException { long position=0; int bufOffset=0; int necessaryLen=10; int extraLen=5; int totalLen=necessaryLen + extraLen; byte[] buf=new byte[totalLen]; FSDataInputStream in=mock(FSDataInputStream.class); when(in.read(position,buf,bufOffset,totalLen)).thenReturn(necessaryLen); boolean ret=HFileBlock.positionalReadWithExtra(in,position,buf,bufOffset,necessaryLen,extraLen); assertFalse("Expect false return when reading extra bytes fails",ret); verify(in).read(position,buf,bufOffset,totalLen); verifyNoMoreInteractions(in); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testPositionalReadExtraSucceeded() throws IOException { long position=0; int bufOffset=0; int necessaryLen=10; int extraLen=5; int totalLen=necessaryLen + extraLen; byte[] buf=new byte[totalLen]; FSDataInputStream in=mock(FSDataInputStream.class); when(in.read(position,buf,bufOffset,totalLen)).thenReturn(totalLen); boolean ret=HFileBlock.positionalReadWithExtra(in,position,buf,bufOffset,necessaryLen,extraLen); assertTrue("Expect true return when reading extra bytes succeeds",ret); verify(in).read(position,buf,bufOffset,totalLen); verifyNoMoreInteractions(in); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testPositionalReadNoExtra() throws IOException { long position=0; int bufOffset=0; int necessaryLen=10; int extraLen=0; int totalLen=necessaryLen + extraLen; byte[] buf=new byte[totalLen]; FSDataInputStream in=mock(FSDataInputStream.class); when(in.read(position,buf,bufOffset,totalLen)).thenReturn(totalLen); boolean ret=HFileBlock.positionalReadWithExtra(in,position,buf,bufOffset,necessaryLen,extraLen); assertFalse("Expect false return when no extra bytes requested",ret); verify(in).read(position,buf,bufOffset,totalLen); verifyNoMoreInteractions(in); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testPositionalReadShortReadCompletesNecessaryAndExtraBytes() throws IOException { long position=0; int bufOffset=0; int necessaryLen=10; int extraLen=5; int totalLen=necessaryLen + extraLen; byte[] buf=new byte[totalLen]; FSDataInputStream in=mock(FSDataInputStream.class); when(in.read(position,buf,bufOffset,totalLen)).thenReturn(5); when(in.read(5,buf,5,10)).thenReturn(10); boolean ret=HFileBlock.positionalReadWithExtra(in,position,buf,bufOffset,necessaryLen,extraLen); assertTrue("Expect true return when reading extra bytes succeeds",ret); verify(in).read(position,buf,bufOffset,totalLen); verify(in).read(5,buf,5,10); verifyNoMoreInteractions(in); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testPositionalReadShortReadOfNecessaryBytes() throws IOException { long position=0; int bufOffset=0; int necessaryLen=10; int extraLen=0; int totalLen=necessaryLen + extraLen; byte[] buf=new byte[totalLen]; FSDataInputStream in=mock(FSDataInputStream.class); when(in.read(position,buf,bufOffset,totalLen)).thenReturn(5); when(in.read(5,buf,5,5)).thenReturn(5); boolean ret=HFileBlock.positionalReadWithExtra(in,position,buf,bufOffset,necessaryLen,extraLen); assertFalse("Expect false return when no extra bytes requested",ret); verify(in).read(position,buf,bufOffset,totalLen); verify(in).read(5,buf,5,5); verifyNoMoreInteractions(in); }

Class: org.apache.hadoop.hbase.io.hfile.TestHFileEncryption

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testHFileEncryptionMetadata() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); CacheConfig cacheConf=new CacheConfig(conf); HFileContext fileContext=new HFileContextBuilder().withEncryptionContext(cryptoContext).build(); Path path=new Path(TEST_UTIL.getDataTestDir(),"cryptometa.hfile"); FSDataOutputStream out=fs.create(path); HFile.Writer writer=HFile.getWriterFactory(conf,cacheConf).withOutputStream(out).withFileContext(fileContext).create(); try { KeyValue kv=new KeyValue("foo".getBytes(),"f1".getBytes(),null,"value".getBytes()); writer.append(kv); } finally { writer.close(); out.close(); } HFile.Reader reader=HFile.createReader(fs,path,cacheConf,conf); try { reader.loadFileInfo(); FixedFileTrailer trailer=reader.getTrailer(); assertNotNull(trailer.getEncryptionKey()); Encryption.Context readerContext=reader.getFileContext().getEncryptionContext(); assertEquals(readerContext.getCipher().getName(),cryptoContext.getCipher().getName()); assertTrue(Bytes.equals(readerContext.getKeyBytes(),cryptoContext.getKeyBytes())); } finally { reader.close(); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=6000000) public void testHFileEncryption() throws Exception { RedundantKVGenerator generator=new RedundantKVGenerator(); List testKvs=generator.generateTestKeyValues(1000); Configuration conf=TEST_UTIL.getConfiguration(); CacheConfig cacheConf=new CacheConfig(conf); for ( DataBlockEncoding encoding : DataBlockEncoding.values()) { for ( Compression.Algorithm compression : TestHFileBlock.COMPRESSION_ALGORITHMS) { HFileContext fileContext=new HFileContextBuilder().withBlockSize(4096).withEncryptionContext(cryptoContext).withCompression(compression).withDataBlockEncoding(encoding).build(); LOG.info("Writing with " + fileContext); Path path=new Path(TEST_UTIL.getDataTestDir(),UUID.randomUUID().toString() + ".hfile"); FSDataOutputStream out=fs.create(path); HFile.Writer writer=HFile.getWriterFactory(conf,cacheConf).withOutputStream(out).withFileContext(fileContext).create(); try { for ( KeyValue kv : testKvs) { writer.append(kv); } } finally { writer.close(); out.close(); } LOG.info("Reading with " + fileContext); int i=0; HFileScanner scanner=null; HFile.Reader reader=HFile.createReader(fs,path,cacheConf,conf); try { reader.loadFileInfo(); FixedFileTrailer trailer=reader.getTrailer(); assertNotNull(trailer.getEncryptionKey()); scanner=reader.getScanner(false,false); assertTrue("Initial seekTo failed",scanner.seekTo()); do { Cell kv=scanner.getCell(); assertTrue("Read back an unexpected or invalid KV",testKvs.contains(KeyValueUtil.ensureKeyValue(kv))); i++; } while (scanner.next()); } finally { reader.close(); scanner.close(); } assertEquals("Did not read back as many KVs as written",i,testKvs.size()); LOG.info("Random seeking with " + fileContext); reader=HFile.createReader(fs,path,cacheConf,conf); try { scanner=reader.getScanner(false,true); assertTrue("Initial seekTo failed",scanner.seekTo()); for (i=0; i < 100; i++) { KeyValue kv=testKvs.get(RNG.nextInt(testKvs.size())); assertEquals("Unable to find KV as expected: " + kv,scanner.seekTo(kv),0); } } finally { scanner.close(); reader.close(); } } } }

Class: org.apache.hadoop.hbase.io.hfile.TestLazyDataBlockDecompression

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressionIncreasesEffectiveBlockCacheSize() throws Exception { int maxSize=(int)(HConstants.DEFAULT_BLOCKSIZE * 2.1); Path hfilePath=new Path(TEST_UTIL.getDataTestDir(),"testCompressionIncreasesEffectiveBlockcacheSize"); HFileContext context=new HFileContextBuilder().withCompression(Compression.Algorithm.GZ).build(); LOG.info("context=" + context); Configuration lazyCompressDisabled=HBaseConfiguration.create(TEST_UTIL.getConfiguration()); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,cacheOnWrite); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,cacheOnWrite); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,cacheOnWrite); lazyCompressDisabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY,false); CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE=new LruBlockCache(maxSize,HConstants.DEFAULT_BLOCKSIZE,false,lazyCompressDisabled); CacheConfig cc=new CacheConfig(lazyCompressDisabled); assertFalse(cc.shouldCacheDataCompressed()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); LruBlockCache disabledBlockCache=(LruBlockCache)cc.getBlockCache(); LOG.info("disabledBlockCache=" + disabledBlockCache); assertEquals("test inconsistency detected.",maxSize,disabledBlockCache.getMaxSize()); assertTrue("eviction thread spawned unintentionally.",disabledBlockCache.getEvictionThread() == null); assertEquals("freshly created blockcache contains blocks.",0,disabledBlockCache.getBlockCount()); writeHFile(lazyCompressDisabled,cc,fs,hfilePath,context,2000); cacheBlocks(lazyCompressDisabled,cc,fs,hfilePath,context); long disabledBlockCount=disabledBlockCache.getBlockCount(); assertTrue("blockcache should contain blocks. disabledBlockCount=" + disabledBlockCount,disabledBlockCount > 0); long disabledEvictedCount=disabledBlockCache.getStats().getEvictedCount(); for ( Map.Entry e : disabledBlockCache.getMapForTests().entrySet()) { HFileBlock block=(HFileBlock)e.getValue().getBuffer(); assertTrue("found a packed block, block=" + block,block.isUnpacked()); } Configuration lazyCompressEnabled=HBaseConfiguration.create(TEST_UTIL.getConfiguration()); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,cacheOnWrite); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,cacheOnWrite); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,cacheOnWrite); lazyCompressEnabled.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY,true); CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE=new LruBlockCache(maxSize,HConstants.DEFAULT_BLOCKSIZE,false,lazyCompressEnabled); cc=new CacheConfig(lazyCompressEnabled); assertTrue("test improperly configured.",cc.shouldCacheDataCompressed()); assertTrue(cc.getBlockCache() instanceof LruBlockCache); LruBlockCache enabledBlockCache=(LruBlockCache)cc.getBlockCache(); LOG.info("enabledBlockCache=" + enabledBlockCache); assertEquals("test inconsistency detected",maxSize,enabledBlockCache.getMaxSize()); assertTrue("eviction thread spawned unintentionally.",enabledBlockCache.getEvictionThread() == null); assertEquals("freshly created blockcache contains blocks.",0,enabledBlockCache.getBlockCount()); cacheBlocks(lazyCompressEnabled,cc,fs,hfilePath,context); long enabledBlockCount=enabledBlockCache.getBlockCount(); assertTrue("blockcache should contain blocks. enabledBlockCount=" + enabledBlockCount,enabledBlockCount > 0); long enabledEvictedCount=enabledBlockCache.getStats().getEvictedCount(); int candidatesFound=0; for ( Map.Entry e : enabledBlockCache.getMapForTests().entrySet()) { candidatesFound++; HFileBlock block=(HFileBlock)e.getValue().getBuffer(); if (cc.shouldCacheCompressed(block.getBlockType().getCategory())) { assertFalse("found an unpacked block, block=" + block + ", block buffer capacity="+ block.getBufferWithoutHeader().capacity(),block.isUnpacked()); } } assertTrue("did not find any candidates for compressed caching. Invalid test.",candidatesFound > 0); LOG.info("disabledBlockCount=" + disabledBlockCount + ", enabledBlockCount="+ enabledBlockCount); assertTrue("enabling compressed data blocks should increase the effective cache size. " + "disabledBlockCount=" + disabledBlockCount + ", enabledBlockCount="+ enabledBlockCount,disabledBlockCount < enabledBlockCount); LOG.info("disabledEvictedCount=" + disabledEvictedCount + ", enabledEvictedCount="+ enabledEvictedCount); assertTrue("enabling compressed data blocks should reduce the number of evictions. " + "disabledEvictedCount=" + disabledEvictedCount + ", enabledEvictedCount="+ enabledEvictedCount,enabledEvictedCount < disabledEvictedCount); }

Class: org.apache.hadoop.hbase.io.hfile.TestLruBlockCache

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testBackgroundEvictionThread() throws Exception { long maxSize=100000; int numBlocks=9; long blockSize=calculateBlockSizeDefault(maxSize,numBlocks); assertTrue("calculateBlockSize appears broken.",blockSize * numBlocks <= maxSize); LruBlockCache cache=new LruBlockCache(maxSize,blockSize); EvictionThread evictionThread=cache.getEvictionThread(); assertTrue(evictionThread != null); CachedItem[] blocks=generateFixedBlocks(numBlocks + 1,blockSize,"block"); while (!evictionThread.isEnteringRun()) { Thread.sleep(1); } for ( CachedItem block : blocks) { cache.cacheBlock(block.cacheKey,block); } int n=0; while (cache.getStats().getEvictionCount() == 0) { Thread.sleep(200); assertTrue("Eviction never happened.",n++ < 20); } n=0; for (long prevCnt=0, curCnt=cache.getBlockCount(); prevCnt != curCnt; prevCnt=curCnt, curCnt=cache.getBlockCount()) { Thread.sleep(200); assertTrue("Cache never stabilized.",n++ < 20); } long evictionCount=cache.getStats().getEvictionCount(); assertTrue(evictionCount >= 1); System.out.println("Background Evictions run: " + evictionCount); }

InternalCallVerifier EqualityVerifier 
@Test public void testPastNPeriodsMetrics() throws Exception { double delta=0.01; CacheStats stats=new CacheStats("test",3); stats.rollMetricsPeriod(); assertEquals(0.0,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.0,stats.getHitCachingRatioPastNPeriods(),delta); stats.hit(false); stats.hit(true); stats.miss(false,false); stats.miss(false,false); stats.rollMetricsPeriod(); assertEquals(0.5,stats.getHitRatioPastNPeriods(),delta); assertEquals(1.0,stats.getHitCachingRatioPastNPeriods(),delta); stats.miss(true,false); stats.miss(false,false); stats.miss(false,false); stats.miss(false,false); stats.rollMetricsPeriod(); assertEquals(0.25,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.5,stats.getHitCachingRatioPastNPeriods(),delta); stats.hit(false); stats.hit(true); stats.hit(false); stats.hit(true); stats.rollMetricsPeriod(); assertEquals(0.5,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.75,stats.getHitCachingRatioPastNPeriods(),delta); stats.miss(true,false); stats.miss(true,false); stats.rollMetricsPeriod(); assertEquals(0.4,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.4,stats.getHitCachingRatioPastNPeriods(),delta); stats.miss(true,false); stats.miss(true,false); stats.hit(false); stats.hit(false); stats.rollMetricsPeriod(); assertEquals(0.6,stats.getHitRatioPastNPeriods(),delta); assertEquals((double)1 / 3,stats.getHitCachingRatioPastNPeriods(),delta); stats.rollMetricsPeriod(); assertEquals((double)1 / 3,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.0,stats.getHitCachingRatioPastNPeriods(),delta); stats.rollMetricsPeriod(); assertEquals(0.5,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.0,stats.getHitCachingRatioPastNPeriods(),delta); stats.rollMetricsPeriod(); assertEquals(0.0,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.0,stats.getHitCachingRatioPastNPeriods(),delta); stats.miss(true,false); stats.miss(false,false); stats.hit(true); stats.hit(false); stats.rollMetricsPeriod(); assertEquals(0.5,stats.getHitRatioPastNPeriods(),delta); assertEquals(0.5,stats.getHitCachingRatioPastNPeriods(),delta); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCacheEvictionSimple() throws Exception { long maxSize=100000; long blockSize=calculateBlockSizeDefault(maxSize,10); LruBlockCache cache=new LruBlockCache(maxSize,blockSize,false); CachedItem[] blocks=generateFixedBlocks(10,blockSize,"block"); long expectedCacheSize=cache.heapSize(); for ( CachedItem block : blocks) { cache.cacheBlock(block.cacheKey,block); expectedCacheSize+=block.cacheBlockHeapSize(); } assertEquals(1,cache.getStats().getEvictionCount()); assertTrue(expectedCacheSize > (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); assertTrue(cache.heapSize() < maxSize); assertTrue(cache.heapSize() < (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); assertTrue(cache.getBlock(blocks[0].cacheKey,true,false,true) == null); for (int i=1; i < blocks.length; i++) { assertEquals(cache.getBlock(blocks[i].cacheKey,true,false,true),blocks[i]); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCacheEvictionTwoPriorities() throws Exception { long maxSize=100000; long blockSize=calculateBlockSizeDefault(maxSize,10); LruBlockCache cache=new LruBlockCache(maxSize,blockSize,false); CachedItem[] singleBlocks=generateFixedBlocks(5,10000,"single"); CachedItem[] multiBlocks=generateFixedBlocks(5,10000,"multi"); long expectedCacheSize=cache.heapSize(); for ( CachedItem block : multiBlocks) { cache.cacheBlock(block.cacheKey,block); expectedCacheSize+=block.cacheBlockHeapSize(); assertEquals(cache.getBlock(block.cacheKey,true,false,true),block); } for ( CachedItem block : singleBlocks) { cache.cacheBlock(block.cacheKey,block); expectedCacheSize+=block.heapSize(); } assertEquals(cache.getStats().getEvictionCount(),1); assertEquals(cache.getStats().getEvictedCount(),2); assertTrue(expectedCacheSize > (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); assertTrue(cache.heapSize() <= maxSize); assertTrue(cache.heapSize() <= (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); assertTrue(cache.getBlock(singleBlocks[0].cacheKey,true,false,true) == null); assertTrue(cache.getBlock(multiBlocks[0].cacheKey,true,false,true) == null); for (int i=1; i < 4; i++) { assertEquals(cache.getBlock(singleBlocks[i].cacheKey,true,false,true),singleBlocks[i]); assertEquals(cache.getBlock(multiBlocks[i].cacheKey,true,false,true),multiBlocks[i]); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testResizeBlockCache() throws Exception { long maxSize=300000; long blockSize=calculateBlockSize(maxSize,31); LruBlockCache cache=new LruBlockCache(maxSize,blockSize,false,(int)Math.ceil(1.2 * maxSize / blockSize),LruBlockCache.DEFAULT_LOAD_FACTOR,LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,0.98f,0.99f,0.33f,0.33f,0.34f,false,16 * 1024 * 1024); CachedItem[] singleBlocks=generateFixedBlocks(10,blockSize,"single"); CachedItem[] multiBlocks=generateFixedBlocks(10,blockSize,"multi"); CachedItem[] memoryBlocks=generateFixedBlocks(10,blockSize,"memory"); for (int i=0; i < 10; i++) { cache.cacheBlock(singleBlocks[i].cacheKey,singleBlocks[i]); cache.cacheBlock(multiBlocks[i].cacheKey,multiBlocks[i]); cache.getBlock(multiBlocks[i].cacheKey,true,false,true); cache.cacheBlock(memoryBlocks[i].cacheKey,memoryBlocks[i],true,false); } assertEquals(0,cache.getStats().getEvictionCount()); cache.setMaxSize((long)(maxSize * 0.5f)); assertEquals(1,cache.getStats().getEvictionCount()); assertEquals(15,cache.getStats().getEvictedCount()); for (int i=0; i < 5; i++) { assertEquals(null,cache.getBlock(singleBlocks[i].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[i].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(memoryBlocks[i].cacheKey,true,false,true)); } for (int i=5; i < 10; i++) { assertEquals(singleBlocks[i],cache.getBlock(singleBlocks[i].cacheKey,true,false,true)); assertEquals(multiBlocks[i],cache.getBlock(multiBlocks[i].cacheKey,true,false,true)); assertEquals(memoryBlocks[i],cache.getBlock(memoryBlocks[i].cacheKey,true,false,true)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testScanResistance() throws Exception { long maxSize=100000; long blockSize=calculateBlockSize(maxSize,10); LruBlockCache cache=new LruBlockCache(maxSize,blockSize,false,(int)Math.ceil(1.2 * maxSize / blockSize),LruBlockCache.DEFAULT_LOAD_FACTOR,LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,0.66f,0.99f,0.33f,0.33f,0.34f,false,16 * 1024 * 1024); CachedItem[] singleBlocks=generateFixedBlocks(20,blockSize,"single"); CachedItem[] multiBlocks=generateFixedBlocks(5,blockSize,"multi"); for ( CachedItem block : multiBlocks) { cache.cacheBlock(block.cacheKey,block); cache.getBlock(block.cacheKey,true,false,true); } for (int i=0; i < 5; i++) { cache.cacheBlock(singleBlocks[i].cacheKey,singleBlocks[i]); } assertEquals(1,cache.getStats().getEvictionCount()); assertEquals(4,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[0].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(singleBlocks[1].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[0].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[1].cacheKey,true,false,true)); for (int i=5; i < 18; i++) { cache.cacheBlock(singleBlocks[i].cacheKey,singleBlocks[i]); } assertEquals(4,cache.getStats().getEvictionCount()); assertEquals(16,cache.getStats().getEvictedCount()); assertEquals(7,cache.getBlockCount()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCacheEvictionThreePriorities() throws Exception { long maxSize=100000; long blockSize=calculateBlockSize(maxSize,10); LruBlockCache cache=new LruBlockCache(maxSize,blockSize,false,(int)Math.ceil(1.2 * maxSize / blockSize),LruBlockCache.DEFAULT_LOAD_FACTOR,LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,0.98f,0.99f,0.33f,0.33f,0.34f,false,16 * 1024 * 1024); CachedItem[] singleBlocks=generateFixedBlocks(5,blockSize,"single"); CachedItem[] multiBlocks=generateFixedBlocks(5,blockSize,"multi"); CachedItem[] memoryBlocks=generateFixedBlocks(5,blockSize,"memory"); long expectedCacheSize=cache.heapSize(); for (int i=0; i < 3; i++) { cache.cacheBlock(singleBlocks[i].cacheKey,singleBlocks[i]); expectedCacheSize+=singleBlocks[i].cacheBlockHeapSize(); cache.cacheBlock(multiBlocks[i].cacheKey,multiBlocks[i]); expectedCacheSize+=multiBlocks[i].cacheBlockHeapSize(); cache.getBlock(multiBlocks[i].cacheKey,true,false,true); cache.cacheBlock(memoryBlocks[i].cacheKey,memoryBlocks[i],true,false); expectedCacheSize+=memoryBlocks[i].cacheBlockHeapSize(); } assertEquals(0,cache.getStats().getEvictionCount()); assertEquals(expectedCacheSize,cache.heapSize()); cache.cacheBlock(singleBlocks[3].cacheKey,singleBlocks[3]); assertEquals(1,cache.getStats().getEvictionCount()); assertEquals(1,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[0].cacheKey,true,false,true)); cache.getBlock(singleBlocks[1].cacheKey,true,false,true); cache.cacheBlock(singleBlocks[4].cacheKey,singleBlocks[4]); assertEquals(2,cache.getStats().getEvictionCount()); assertEquals(2,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(multiBlocks[0].cacheKey,true,false,true)); cache.cacheBlock(memoryBlocks[3].cacheKey,memoryBlocks[3],true,false); assertEquals(3,cache.getStats().getEvictionCount()); assertEquals(3,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(memoryBlocks[0].cacheKey,true,false,true)); CachedItem[] bigBlocks=generateFixedBlocks(3,blockSize * 3,"big"); cache.cacheBlock(bigBlocks[0].cacheKey,bigBlocks[0]); assertEquals(4,cache.getStats().getEvictionCount()); assertEquals(6,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[2].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(singleBlocks[3].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(singleBlocks[4].cacheKey,true,false,true)); cache.getBlock(bigBlocks[0].cacheKey,true,false,true); cache.cacheBlock(bigBlocks[1].cacheKey,bigBlocks[1]); assertEquals(5,cache.getStats().getEvictionCount()); assertEquals(9,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[1].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[1].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[2].cacheKey,true,false,true)); cache.cacheBlock(bigBlocks[2].cacheKey,bigBlocks[2],true,false); assertEquals(6,cache.getStats().getEvictionCount()); assertEquals(12,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(memoryBlocks[1].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(memoryBlocks[2].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(memoryBlocks[3].cacheKey,true,false,true)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testCacheSimple() throws Exception { long maxSize=1000000; long blockSize=calculateBlockSizeDefault(maxSize,101); LruBlockCache cache=new LruBlockCache(maxSize,blockSize); CachedItem[] blocks=generateRandomBlocks(100,blockSize); long expectedCacheSize=cache.heapSize(); for ( CachedItem block : blocks) { assertTrue(cache.getBlock(block.cacheKey,true,false,true) == null); } for ( CachedItem block : blocks) { cache.cacheBlock(block.cacheKey,block); expectedCacheSize+=block.cacheBlockHeapSize(); } assertEquals(expectedCacheSize,cache.heapSize()); for ( CachedItem block : blocks) { HeapSize buf=cache.getBlock(block.cacheKey,true,false,true); assertTrue(buf != null); assertEquals(buf.heapSize(),block.heapSize()); } long expectedBlockCount=cache.getBlockCount(); for ( CachedItem block : blocks) { cache.cacheBlock(block.cacheKey,block); } assertEquals("Cache should ignore cache requests for blocks already in cache",expectedBlockCount,cache.getBlockCount()); assertEquals(expectedCacheSize,cache.heapSize()); for ( CachedItem block : blocks) { HeapSize buf=cache.getBlock(block.cacheKey,true,false,true); assertTrue(buf != null); assertEquals(buf.heapSize(),block.heapSize()); } assertEquals(0,cache.getStats().getEvictionCount()); Thread t=new LruBlockCache.StatisticsThread(cache); t.start(); t.join(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMaxBlockSize() throws Exception { long maxSize=100000; long blockSize=calculateBlockSize(maxSize,10); LruBlockCache cache=new LruBlockCache(maxSize,blockSize,false,(int)Math.ceil(1.2 * maxSize / blockSize),LruBlockCache.DEFAULT_LOAD_FACTOR,LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,0.66f,0.99f,0.33f,0.33f,0.34f,false,1024); CachedItem[] tooLong=generateFixedBlocks(10,1024 + 5,"long"); CachedItem[] small=generateFixedBlocks(15,600,"small"); for ( CachedItem i : tooLong) { cache.cacheBlock(i.cacheKey,i); } for ( CachedItem i : small) { cache.cacheBlock(i.cacheKey,i); } assertEquals(15,cache.getBlockCount()); for ( CachedItem i : small) { assertNotNull(cache.getBlock(i.cacheKey,true,false,false)); } for ( CachedItem i : tooLong) { assertNull(cache.getBlock(i.cacheKey,true,false,false)); } assertEquals(10,cache.getStats().getFailedInserts()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCacheEvictionInMemoryForceMode() throws Exception { long maxSize=100000; long blockSize=calculateBlockSize(maxSize,10); LruBlockCache cache=new LruBlockCache(maxSize,blockSize,false,(int)Math.ceil(1.2 * maxSize / blockSize),LruBlockCache.DEFAULT_LOAD_FACTOR,LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,0.98f,0.99f,0.2f,0.3f,0.5f,true,16 * 1024 * 1024); CachedItem[] singleBlocks=generateFixedBlocks(10,blockSize,"single"); CachedItem[] multiBlocks=generateFixedBlocks(10,blockSize,"multi"); CachedItem[] memoryBlocks=generateFixedBlocks(10,blockSize,"memory"); long expectedCacheSize=cache.heapSize(); for (int i=0; i < 4; i++) { cache.cacheBlock(singleBlocks[i].cacheKey,singleBlocks[i]); expectedCacheSize+=singleBlocks[i].cacheBlockHeapSize(); cache.cacheBlock(multiBlocks[i].cacheKey,multiBlocks[i]); expectedCacheSize+=multiBlocks[i].cacheBlockHeapSize(); cache.getBlock(multiBlocks[i].cacheKey,true,false,true); } cache.cacheBlock(singleBlocks[4].cacheKey,singleBlocks[4]); expectedCacheSize+=singleBlocks[4].cacheBlockHeapSize(); assertEquals(0,cache.getStats().getEvictionCount()); assertEquals(expectedCacheSize,cache.heapSize()); cache.cacheBlock(memoryBlocks[0].cacheKey,memoryBlocks[0],true,false); assertEquals(1,cache.getStats().getEvictionCount()); assertEquals(1,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[0].cacheKey,true,false,true)); cache.cacheBlock(memoryBlocks[1].cacheKey,memoryBlocks[1],true,false); assertEquals(2,cache.getStats().getEvictionCount()); assertEquals(2,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[1].cacheKey,true,false,true)); cache.cacheBlock(memoryBlocks[2].cacheKey,memoryBlocks[2],true,false); cache.cacheBlock(memoryBlocks[3].cacheKey,memoryBlocks[3],true,false); cache.cacheBlock(memoryBlocks[4].cacheKey,memoryBlocks[4],true,false); cache.cacheBlock(memoryBlocks[5].cacheKey,memoryBlocks[5],true,false); assertEquals(6,cache.getStats().getEvictionCount()); assertEquals(6,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[2].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(singleBlocks[3].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[0].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[1].cacheKey,true,false,true)); cache.cacheBlock(memoryBlocks[6].cacheKey,memoryBlocks[6],true,false); cache.cacheBlock(memoryBlocks[7].cacheKey,memoryBlocks[7],true,false); cache.cacheBlock(memoryBlocks[8].cacheKey,memoryBlocks[8],true,false); assertEquals(9,cache.getStats().getEvictionCount()); assertEquals(9,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[4].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[2].cacheKey,true,false,true)); assertEquals(null,cache.getBlock(multiBlocks[3].cacheKey,true,false,true)); cache.cacheBlock(memoryBlocks[9].cacheKey,memoryBlocks[9],true,false); assertEquals(10,cache.getStats().getEvictionCount()); assertEquals(10,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(memoryBlocks[0].cacheKey,true,false,true)); cache.cacheBlock(singleBlocks[9].cacheKey,singleBlocks[9]); assertEquals(11,cache.getStats().getEvictionCount()); assertEquals(11,cache.getStats().getEvictedCount()); assertEquals(null,cache.getBlock(singleBlocks[9].cacheKey,true,false,true)); }

Class: org.apache.hadoop.hbase.io.hfile.TestLruCachedBlock

InternalCallVerifier EqualityVerifier 
@Test public void testEquality(){ assertEquals(block.hashCode(),blockEqual.hashCode()); assertNotEquals(block.hashCode(),blockNotEqual.hashCode()); assertEquals(block,blockEqual); assertNotEquals(block,blockNotEqual); }

Class: org.apache.hadoop.hbase.io.hfile.TestScannerFromBucketCache

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBasicScanWithOffheapBucketCacheWithMBB() throws IOException { setUp(true,true); byte[] row1=Bytes.toBytes("row1offheap"); byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("qualifier2"); byte[] fam1=Bytes.toBytes("famoffheap"); long ts1=1; long ts2=ts1 + 1; long ts3=ts1 + 2; String method=this.getName(); this.region=initHRegion(tableName,method,conf,test_util,fam1); try { List expected=insertData(row1,qf1,qf2,fam1,ts1,ts2,ts3,true); List actual=performScan(row1,fam1); for (int i=0; i < expected.size(); i++) { assertFalse(actual.get(i) instanceof OffheapKeyValue); assertFalse(actual.get(i) instanceof ShareableMemory); assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i),actual.get(i))); } Thread.sleep(500); Scan scan=new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(10); actual=new ArrayList(); InternalScanner scanner=region.getScanner(scan,false); boolean hasNext=scanner.next(actual); assertEquals(false,hasNext); for (int i=0; i < expected.size(); i++) { if (i != 5) { assertTrue(actual.get(i) instanceof OffheapKeyValue); } assertTrue(actual.get(i) instanceof ShareableMemory); } } catch ( InterruptedException e) { } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

Class: org.apache.hadoop.hbase.io.hfile.TestScannerSelectionUsingKeyRange

InternalCallVerifier EqualityVerifier 
@Test public void testScannerSelection() throws IOException { Configuration conf=TEST_UTIL.getConfiguration(); conf.setInt("hbase.hstore.compactionThreshold",10000); HColumnDescriptor hcd=new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true).setBloomFilterType(bloomType); HTableDescriptor htd=new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info=new HRegionInfo(TABLE); Region region=HBaseTestingUtility.createRegionAndWAL(info,TEST_UTIL.getDataTestDir(),conf,htd); for (int iFile=0; iFile < NUM_FILES; ++iFile) { for (int iRow=0; iRow < NUM_ROWS; ++iRow) { Put put=new Put(Bytes.toBytes("row" + iRow)); for (int iCol=0; iCol < NUM_COLS_PER_ROW; ++iCol) { put.addColumn(FAMILY_BYTES,Bytes.toBytes("col" + iCol),Bytes.toBytes("value" + iFile + "_"+ iRow+ "_"+ iCol)); } region.put(put); } region.flush(true); } Scan scan=new Scan(Bytes.toBytes("aaa"),Bytes.toBytes("aaz")); CacheConfig.blockCacheDisabled=false; CacheConfig cacheConf=new CacheConfig(conf); LruBlockCache cache=(LruBlockCache)cacheConf.getBlockCache(); cache.clearCache(); InternalScanner scanner=region.getScanner(scan); List results=new ArrayList(); while (scanner.next(results)) { } scanner.close(); assertEquals(0,results.size()); Set accessedFiles=cache.getCachedFileNamesForTest(); assertEquals(expectedCount,accessedFiles.size()); HBaseTestingUtility.closeRegionAndWAL(region); }

Class: org.apache.hadoop.hbase.io.hfile.TestSeekBeforeWithInlineBlocks

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Scanner.seekBefore() could fail because when seeking to a previous HFile data block, it needs * to know the size of that data block, which it calculates using current data block offset and * the previous data block offset. This fails to work when there are leaf-level index blocks in * the scannable section of the HFile, i.e. starting in HFileV2. This test will try seekBefore() * on a flat (single-level) and multi-level (2,3) HFile and confirm this bug is now fixed. This * bug also happens for inline Bloom blocks for the same reasons. */ @Test public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { conf=TEST_UTIL.getConfiguration(); for (int hfileVersion=HFile.MIN_FORMAT_VERSION_WITH_TAGS; hfileVersion <= HFile.MAX_FORMAT_VERSION; hfileVersion++) { conf.setInt(HFile.FORMAT_VERSION_KEY,hfileVersion); fs=HFileSystem.get(conf); for ( BloomType bloomType : BloomType.values()) { for (int testI=0; testI < INDEX_CHUNK_SIZES.length; testI++) { int indexBlockSize=INDEX_CHUNK_SIZES[testI]; int expectedNumLevels=EXPECTED_NUM_LEVELS[testI]; LOG.info(String.format("Testing HFileVersion: %s, BloomType: %s, Index Levels: %s",hfileVersion,bloomType,expectedNumLevels)); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,indexBlockSize); conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,BLOOM_BLOCK_SIZE); Cell[] cells=new Cell[NUM_KV]; Path hfilePath=new Path(TEST_UTIL.getDataTestDir(),String.format("testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s",hfileVersion,bloomType,testI)); conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,0.0f); CacheConfig cacheConf=new CacheConfig(conf); { HFileContext meta=new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build(); StoreFile.Writer storeFileWriter=new StoreFile.WriterBuilder(conf,cacheConf,fs).withFilePath(hfilePath).withFileContext(meta).withBloomType(bloomType).build(); for (int i=0; i < NUM_KV; i++) { byte[] row=RandomKeyValueUtil.randomOrderedKey(RAND,i); byte[] qual=RandomKeyValueUtil.randomRowOrQualifier(RAND); byte[] value=RandomKeyValueUtil.randomValue(RAND); KeyValue kv=new KeyValue(row,FAM,qual,value); storeFileWriter.append(kv); cells[i]=kv; } storeFileWriter.close(); } HFile.Reader reader=HFile.createReader(fs,hfilePath,cacheConf,conf); assertEquals(expectedNumLevels,reader.getTrailer().getNumDataIndexLevels()); for ( boolean pread : new boolean[]{false,true}) { HFileScanner scanner=reader.getScanner(true,pread); checkNoSeekBefore(cells,scanner,0); for (int i=1; i < NUM_KV; i++) { checkSeekBefore(cells,scanner,i); checkCell(cells[i - 1],scanner.getCell()); } assertTrue(scanner.seekTo()); for (int i=NUM_KV - 1; i >= 1; i--) { checkSeekBefore(cells,scanner,i); checkCell(cells[i - 1],scanner.getCell()); } checkNoSeekBefore(cells,scanner,0); scanner.close(); } reader.close(); } } } }

Class: org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMemoryLeak() throws Exception { final BlockCacheKey cacheKey=new BlockCacheKey("dummy",1L); cacheAndWaitUntilFlushedToBucket(cache,cacheKey,new CacheTestUtils.ByteArrayCacheable(new byte[10])); long lockId=cache.backingMap.get(cacheKey).offset(); ReentrantReadWriteLock lock=cache.offsetLock.getLock(lockId); lock.writeLock().lock(); Thread evictThread=new Thread("evict-block"){ @Override public void run(){ cache.evictBlock(cacheKey); } } ; evictThread.start(); cache.offsetLock.waitForWaiters(lockId,1); cache.blockEvicted(cacheKey,cache.backingMap.remove(cacheKey),true); cacheAndWaitUntilFlushedToBucket(cache,cacheKey,new CacheTestUtils.ByteArrayCacheable(new byte[10])); lock.writeLock().unlock(); evictThread.join(); assertEquals(1L,cache.getBlockCount()); assertTrue(cache.getCurrentSize() > 0L); assertTrue("We should have a block!",cache.iterator().hasNext()); }

InternalCallVerifier EqualityVerifier 
@Test public void testBucketAllocator() throws BucketAllocatorException { BucketAllocator mAllocator=cache.getAllocator(); final List BLOCKSIZES=Arrays.asList(4 * 1024,8 * 1024,64 * 1024,96 * 1024); boolean full=false; ArrayList allocations=new ArrayList(); List tmp=new ArrayList(BLOCKSIZES); while (!full) { Integer blockSize=null; try { blockSize=randFrom(tmp); allocations.add(mAllocator.allocateBlock(blockSize)); } catch ( CacheFullException cfe) { tmp.remove(blockSize); if (tmp.isEmpty()) full=true; } } for ( Integer blockSize : BLOCKSIZES) { BucketSizeInfo bucketSizeInfo=mAllocator.roundUpToBucketSizeInfo(blockSize); IndexStatistics indexStatistics=bucketSizeInfo.statistics(); assertEquals("unexpected freeCount for " + bucketSizeInfo,0,indexStatistics.freeCount()); } for ( long offset : allocations) { assertEquals(mAllocator.sizeOfAllocation(offset),mAllocator.freeBlock(offset)); } assertEquals(0,mAllocator.getUsedSize()); }

Class: org.apache.hadoop.hbase.io.hfile.bucket.TestFileIOEngine

IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testFileIOEngine() throws IOException { int size=2 * 1024 * 1024; String filePath="testFileIOEngine"; try { FileIOEngine fileIOEngine=new FileIOEngine(filePath,size); for (int i=0; i < 50; i++) { int len=(int)Math.floor(Math.random() * 100); long offset=(long)Math.floor(Math.random() * size % (size - len)); byte[] data1=new byte[len]; for (int j=0; j < data1.length; ++j) { data1[j]=(byte)(Math.random() * 255); } fileIOEngine.write(ByteBuffer.wrap(data1),offset); BufferGrabbingDeserializer deserializer=new BufferGrabbingDeserializer(); fileIOEngine.read(offset,len,deserializer); ByteBuff data2=deserializer.getDeserializedByteBuff(); for (int j=0; j < data1.length; ++j) { assertTrue(data1[j] == data2.get(j)); } } } finally { File file=new File(filePath); if (file.exists()) { file.delete(); } } }

Class: org.apache.hadoop.hbase.io.util.TestLRUDictionary

InternalCallVerifier EqualityVerifier 
/** * Assert can't add empty array. */ @Test public void testPassingEmptyArrayToFindEntry(){ assertEquals(Dictionary.NOT_IN_DICTIONARY,testee.findEntry(HConstants.EMPTY_BYTE_ARRAY,0,0)); assertEquals(Dictionary.NOT_IN_DICTIONARY,testee.addEntry(HConstants.EMPTY_BYTE_ARRAY,0,0)); }

IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void TestLRUPolicy(){ for (int i=0; i < Short.MAX_VALUE; i++) { testee.findEntry((BigInteger.valueOf(i)).toByteArray(),0,(BigInteger.valueOf(i)).toByteArray().length); } assertTrue(testee.findEntry(BigInteger.ZERO.toByteArray(),0,BigInteger.ZERO.toByteArray().length) != -1); assertTrue(testee.findEntry(BigInteger.valueOf(Integer.MAX_VALUE).toByteArray(),0,BigInteger.valueOf(Integer.MAX_VALUE).toByteArray().length) == -1); assertTrue(testee.findEntry(BigInteger.valueOf(Integer.MAX_VALUE).toByteArray(),0,BigInteger.valueOf(Integer.MAX_VALUE).toByteArray().length) != -1); assertTrue(testee.findEntry(BigInteger.ZERO.toByteArray(),0,BigInteger.ZERO.toByteArray().length) != -1); for (int i=1; i < Short.MAX_VALUE; i++) { assertTrue(testee.findEntry(BigInteger.valueOf(i).toByteArray(),0,BigInteger.valueOf(i).toByteArray().length) == -1); } for (int i=0; i < Short.MAX_VALUE; i++) { assertTrue(testee.findEntry(BigInteger.valueOf(i).toByteArray(),0,BigInteger.valueOf(i).toByteArray().length) != -1); } }

InternalCallVerifier BooleanVerifier 
@Test public void testPassingSameArrayToAddEntry(){ int len=HConstants.CATALOG_FAMILY.length; int index=testee.addEntry(HConstants.CATALOG_FAMILY,0,len); assertFalse(index == testee.addEntry(HConstants.CATALOG_FAMILY,0,len)); assertFalse(index == testee.addEntry(HConstants.CATALOG_FAMILY,0,len)); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testBasic(){ Random rand=new Random(); byte[] testBytes=new byte[10]; rand.nextBytes(testBytes); assertEquals(testee.findEntry(testBytes,0,testBytes.length),-1); assertFalse(isDictionaryEmpty(testee)); short t=testee.findEntry(testBytes,0,testBytes.length); assertTrue(t != -1); byte[] testBytesCopy=new byte[20]; Bytes.putBytes(testBytesCopy,10,testBytes,0,testBytes.length); assertEquals(testee.findEntry(testBytesCopy,10,testBytes.length),t); assertTrue(Arrays.equals(testBytes,testee.getEntry(t))); testee.clear(); assertTrue(isDictionaryEmpty(testee)); }

Class: org.apache.hadoop.hbase.ipc.AbstractTestIPC

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * It is hard to verify the compression is actually happening under the wraps. Hope that if * unsupported, we'll get an exception out of some time (meantime, have to trace it manually to * confirm that compression is happening down in the client and server). * @throws IOException * @throws InterruptedException * @throws SecurityException * @throws NoSuchMethodException */ @Test public void testCompressCellBlock() throws IOException, InterruptedException, SecurityException, NoSuchMethodException, ServiceException { Configuration conf=new Configuration(HBaseConfiguration.create()); conf.set("hbase.client.rpc.compressor",GzipCodec.class.getCanonicalName()); List cells=new ArrayList(); int count=3; for (int i=0; i < count; i++) { cells.add(CELL); } AbstractRpcClient client=createRpcClient(conf); TestRpcServer rpcServer=new TestRpcServer(); try { rpcServer.start(); MethodDescriptor md=SERVICE.getDescriptorForType().findMethodByName("echo"); EchoRequestProto param=EchoRequestProto.newBuilder().setMessage("hello").build(); PayloadCarryingRpcController pcrc=new PayloadCarryingRpcController(CellUtil.createCellScanner(cells)); InetSocketAddress address=rpcServer.getListenerAddress(); if (address == null) { throw new IOException("Listener channel is closed"); } Pair r=client.call(pcrc,md,param,md.getOutputType().toProto(),User.getCurrent(),address,new MetricsConnection.CallStats()); int index=0; while (r.getSecond().advance()) { assertTrue(CELL.equals(r.getSecond().current())); index++; } assertEquals(count,index); } finally { client.close(); rpcServer.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testWrapException() throws Exception { AbstractRpcClient client=(AbstractRpcClient)RpcClientFactory.createClient(CONF,"AbstractTestIPC"); final InetSocketAddress address=InetSocketAddress.createUnresolved("localhost",0); assertTrue(client.wrapException(address,new ConnectException()) instanceof ConnectException); assertTrue(client.wrapException(address,new SocketTimeoutException()) instanceof SocketTimeoutException); assertTrue(client.wrapException(address,new ConnectionClosingException("Test AbstractRpcClient#wrapException")) instanceof ConnectionClosingException); assertTrue(client.wrapException(address,new CallTimeoutException("Test AbstractRpcClient#wrapException")).getCause() instanceof CallTimeoutException); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests that the RpcServer creates & dispatches CallRunner object to scheduler with non-null * remoteAddress set to its Call Object * @throws ServiceException */ @Test public void testRpcServerForNotNullRemoteAddressInCallObject() throws IOException, ServiceException { final RpcScheduler scheduler=new FifoRpcScheduler(CONF,1); final TestRpcServer1 rpcServer=new TestRpcServer1(scheduler); final InetSocketAddress localAddr=new InetSocketAddress("localhost",0); final AbstractRpcClient client=new RpcClientImpl(CONF,HConstants.CLUSTER_ID_DEFAULT,localAddr,null); try { rpcServer.start(); final InetSocketAddress isa=rpcServer.getListenerAddress(); if (isa == null) { throw new IOException("Listener channel is closed"); } final BlockingRpcChannel channel=client.createBlockingRpcChannel(ServerName.valueOf(isa.getHostName(),isa.getPort(),System.currentTimeMillis()),User.getCurrent(),0); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface stub=TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(channel); final EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage("GetRemoteAddress").build(); final EchoResponseProto echoResponse=stub.echo(null,echoRequest); Assert.assertEquals(localAddr.getAddress().getHostAddress(),echoResponse.getMessage()); } finally { client.close(); rpcServer.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Ensure we do not HAVE TO HAVE a codec. * @throws InterruptedException * @throws IOException */ @Test public void testNoCodec() throws InterruptedException, IOException { Configuration conf=HBaseConfiguration.create(); AbstractRpcClient client=createRpcClientNoCodec(conf); TestRpcServer rpcServer=new TestRpcServer(); try { rpcServer.start(); MethodDescriptor md=SERVICE.getDescriptorForType().findMethodByName("echo"); final String message="hello"; EchoRequestProto param=EchoRequestProto.newBuilder().setMessage(message).build(); InetSocketAddress address=rpcServer.getListenerAddress(); if (address == null) { throw new IOException("Listener channel is closed"); } Pair r=client.call(null,md,param,md.getOutputType().toProto(),User.getCurrent(),address,new MetricsConnection.CallStats()); assertTrue(r.getSecond() == null); assertTrue(r.getFirst().toString().contains(message)); } finally { client.close(); rpcServer.stop(); } }

Class: org.apache.hadoop.hbase.ipc.TestGlobalEventLoopGroup

InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier HybridVerifier 
@Test public void test(){ Configuration conf=HBaseConfiguration.create(); conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP,true); AsyncRpcClient client=new AsyncRpcClient(conf); assertNotNull(AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP); AsyncRpcClient client1=new AsyncRpcClient(conf); assertSame(client.bootstrap.group(),client1.bootstrap.group()); client1.close(); assertFalse(client.bootstrap.group().isShuttingDown()); conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP,false); AsyncRpcClient client2=new AsyncRpcClient(conf); assertNotSame(client.bootstrap.group(),client2.bootstrap.group()); client2.close(); client.close(); }

Class: org.apache.hadoop.hbase.ipc.TestHBaseClient

InternalCallVerifier BooleanVerifier 
@Test public void testFailedServer(){ ManualEnvironmentEdge ee=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(ee); FailedServers fs=new FailedServers(new Configuration()); InetSocketAddress ia=InetSocketAddress.createUnresolved("bad",12); InetSocketAddress ia2=InetSocketAddress.createUnresolved("bad",12); InetSocketAddress ia3=InetSocketAddress.createUnresolved("badtoo",12); InetSocketAddress ia4=InetSocketAddress.createUnresolved("badtoo",13); Assert.assertFalse(fs.isFailedServer(ia)); fs.addToFailedServers(ia); Assert.assertTrue(fs.isFailedServer(ia)); Assert.assertTrue(fs.isFailedServer(ia2)); ee.incValue(1); Assert.assertTrue(fs.isFailedServer(ia)); Assert.assertTrue(fs.isFailedServer(ia2)); ee.incValue(RpcClient.FAILED_SERVER_EXPIRY_DEFAULT + 1); Assert.assertFalse(fs.isFailedServer(ia)); Assert.assertFalse(fs.isFailedServer(ia2)); fs.addToFailedServers(ia); fs.addToFailedServers(ia3); fs.addToFailedServers(ia4); Assert.assertTrue(fs.isFailedServer(ia)); Assert.assertTrue(fs.isFailedServer(ia2)); Assert.assertTrue(fs.isFailedServer(ia3)); Assert.assertTrue(fs.isFailedServer(ia4)); ee.incValue(RpcClient.FAILED_SERVER_EXPIRY_DEFAULT + 1); Assert.assertFalse(fs.isFailedServer(ia)); Assert.assertFalse(fs.isFailedServer(ia2)); Assert.assertFalse(fs.isFailedServer(ia3)); Assert.assertFalse(fs.isFailedServer(ia4)); fs.addToFailedServers(ia3); Assert.assertFalse(fs.isFailedServer(ia4)); }

Class: org.apache.hadoop.hbase.ipc.TestPayloadCarryingRpcController

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListOfCellScannerables() throws IOException { List cells=new ArrayList(); final int count=10; for (int i=0; i < count; i++) { cells.add(createCell(i)); } PayloadCarryingRpcController controller=new PayloadCarryingRpcController(cells); CellScanner cellScanner=controller.cellScanner(); int index=0; for (; cellScanner.advance(); index++) { Cell cell=cellScanner.current(); byte[] indexBytes=Bytes.toBytes(index); assertTrue("" + index,Bytes.equals(indexBytes,0,indexBytes.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); } assertEquals(count,index); }

Class: org.apache.hadoop.hbase.ipc.TestRpcMetrics

InternalCallVerifier EqualityVerifier 
@Test public void testFactory(){ MetricsHBaseServer masterMetrics=new MetricsHBaseServer("HMaster",new MetricsHBaseServerWrapperStub()); MetricsHBaseServerSource masterSource=masterMetrics.getMetricsSource(); MetricsHBaseServer rsMetrics=new MetricsHBaseServer("HRegionServer",new MetricsHBaseServerWrapperStub()); MetricsHBaseServerSource rsSource=rsMetrics.getMetricsSource(); assertEquals("master",masterSource.getMetricsContext()); assertEquals("regionserver",rsSource.getMetricsContext()); assertEquals("Master,sub=IPC",masterSource.getMetricsJmxContext()); assertEquals("RegionServer,sub=IPC",rsSource.getMetricsJmxContext()); assertEquals("Master",masterSource.getMetricsName()); assertEquals("RegionServer",rsSource.getMetricsName()); }

Class: org.apache.hadoop.hbase.mapred.TestGroupingTableMap

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings({"deprecation"}) public void shouldCreateNewKey() throws Exception { GroupingTableMap gTableMap=null; try { Result result=mock(Result.class); Reporter reporter=mock(Reporter.class); final byte[] bSeparator=Bytes.toBytes(" "); gTableMap=new GroupingTableMap(); Configuration cfg=new Configuration(); cfg.set(GroupingTableMap.GROUP_COLUMNS,"familyA:qualifierA familyB:qualifierB"); JobConf jobConf=new JobConf(cfg); gTableMap.configure(jobConf); final byte[] firstPartKeyValue=Bytes.toBytes("34879512738945"); final byte[] secondPartKeyValue=Bytes.toBytes("35245142671437"); byte[] row={}; List cells=ImmutableList.of(new KeyValue(row,"familyA".getBytes(),"qualifierA".getBytes(),firstPartKeyValue),new KeyValue(row,"familyB".getBytes(),"qualifierB".getBytes(),secondPartKeyValue)); when(result.listCells()).thenReturn(cells); final AtomicBoolean outputCollected=new AtomicBoolean(); OutputCollector outputCollector=new OutputCollector(){ @Override public void collect( ImmutableBytesWritable arg, Result result) throws IOException { assertArrayEquals(com.google.common.primitives.Bytes.concat(firstPartKeyValue,bSeparator,secondPartKeyValue),arg.copyBytes()); outputCollected.set(true); } } ; gTableMap.map(null,result,outputCollector,reporter); verify(result).listCells(); Assert.assertTrue("Output not received",outputCollected.get()); final byte[] firstPartValue=Bytes.toBytes("238947928"); final byte[] secondPartValue=Bytes.toBytes("4678456942345"); byte[][] data={firstPartValue,secondPartValue}; ImmutableBytesWritable byteWritable=gTableMap.createGroupKey(data); assertArrayEquals(com.google.common.primitives.Bytes.concat(firstPartValue,bSeparator,secondPartValue),byteWritable.get()); } finally { if (gTableMap != null) gTableMap.close(); } }

Class: org.apache.hadoop.hbase.mapred.TestRowCounter

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test @SuppressWarnings("deprecation") public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() throws Exception { final String[] args=new String[]{"one","two"}; String line="ERROR: Wrong number of parameters: " + args.length; String result=new OutputReader(System.err){ @Override void doRead() throws Exception { assertEquals(-1,new RowCounter().run(args)); } } .read(); assertTrue(result.startsWith(line)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings({"deprecation"}) public void shouldCreateAndRunSubmittableJob() throws Exception { RowCounter rCounter=new RowCounter(); rCounter.setConf(HBaseConfiguration.create()); String[] args=new String[]{"\temp","tableA","column1","column2","column3"}; JobConf jobConfig=rCounter.createSubmittableJob(args); assertNotNull(jobConfig); assertEquals(0,jobConfig.getNumReduceTasks()); assertEquals("rowcounter",jobConfig.getJobName()); assertEquals(jobConfig.getMapOutputValueClass(),Result.class); assertEquals(jobConfig.getMapperClass(),RowCounterMapper.class); assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST),Joiner.on(' ').join("column1","column2","column3")); assertEquals(jobConfig.getMapOutputKeyClass(),ImmutableBytesWritable.class); }

Class: org.apache.hadoop.hbase.mapred.TestSplitTable

InternalCallVerifier EqualityVerifier 
@Test @SuppressWarnings("deprecation") public void testSplitTableEquals(){ byte[] tableA=Bytes.toBytes("tableA"); byte[] aaa=Bytes.toBytes("aaa"); byte[] ddd=Bytes.toBytes("ddd"); String locationA="locationA"; TableSplit tablesplit=new TableSplit(tableA,aaa,ddd,locationA); TableSplit tableB=new TableSplit(Bytes.toBytes("tableB"),aaa,ddd,locationA); assertNotEquals(tablesplit.hashCode(),tableB.hashCode()); assertNotEquals(tablesplit,tableB); TableSplit startBbb=new TableSplit(tableA,Bytes.toBytes("bbb"),ddd,locationA); assertNotEquals(tablesplit.hashCode(),startBbb.hashCode()); assertNotEquals(tablesplit,startBbb); TableSplit endEee=new TableSplit(tableA,aaa,Bytes.toBytes("eee"),locationA); assertNotEquals(tablesplit.hashCode(),endEee.hashCode()); assertNotEquals(tablesplit,endEee); TableSplit locationB=new TableSplit(tableA,aaa,ddd,"locationB"); assertNotEquals(tablesplit.hashCode(),locationB.hashCode()); assertNotEquals(tablesplit,locationB); TableSplit same=new TableSplit(tableA,aaa,ddd,locationA); assertEquals(tablesplit.hashCode(),same.hashCode()); assertEquals(tablesplit,same); }

InternalCallVerifier BooleanVerifier 
@Test @SuppressWarnings("deprecation") public void testSplitTableCompareTo(){ TableSplit aTableSplit=new TableSplit(Bytes.toBytes("tableA"),Bytes.toBytes("aaa"),Bytes.toBytes("ddd"),"locationA"); TableSplit bTableSplit=new TableSplit(Bytes.toBytes("tableA"),Bytes.toBytes("iii"),Bytes.toBytes("kkk"),"locationA"); TableSplit cTableSplit=new TableSplit(Bytes.toBytes("tableA"),Bytes.toBytes("lll"),Bytes.toBytes("zzz"),"locationA"); assertTrue(aTableSplit.compareTo(aTableSplit) == 0); assertTrue(bTableSplit.compareTo(bTableSplit) == 0); assertTrue(cTableSplit.compareTo(cTableSplit) == 0); assertTrue(aTableSplit.compareTo(bTableSplit) < 0); assertTrue(bTableSplit.compareTo(aTableSplit) > 0); assertTrue(aTableSplit.compareTo(cTableSplit) < 0); assertTrue(cTableSplit.compareTo(aTableSplit) > 0); assertTrue(bTableSplit.compareTo(cTableSplit) < 0); assertTrue(cTableSplit.compareTo(bTableSplit) > 0); assertTrue(cTableSplit.compareTo(aTableSplit) > 0); }

Class: org.apache.hadoop.hbase.mapred.TestTableMapReduceUtil

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test @SuppressWarnings("deprecation") public void shoudBeValidMapReduceEvaluation() throws Exception { Configuration cfg=UTIL.getConfiguration(); JobConf jobConf=new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(TABLE_NAME,new String(COLUMN_FAMILY),ClassificatorMapper.class,ImmutableBytesWritable.class,Put.class,jobConf); TableMapReduceUtil.initTableReduceJob(TABLE_NAME,ClassificatorRowReduce.class,jobConf); RunningJob job=JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Check what the given number of reduce tasks for the given job configuration * does not exceed the number of regions for the given table. */ @Test public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Assert.assertNotNull(presidentsTable); Configuration cfg=UTIL.getConfiguration(); JobConf jobConf=new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME,jobConf); TableMapReduceUtil.limitNumReduceTasks(TABLE_NAME,jobConf); TableMapReduceUtil.setScannerCaching(jobConf,100); assertEquals(1,jobConf.getNumReduceTasks()); assertEquals(100,jobConf.getInt("hbase.client.scanner.caching",0)); jobConf.setNumReduceTasks(10); TableMapReduceUtil.setNumMapTasks(TABLE_NAME,jobConf); TableMapReduceUtil.limitNumReduceTasks(TABLE_NAME,jobConf); assertEquals(1,jobConf.getNumReduceTasks()); }

InternalCallVerifier EqualityVerifier 
@Test public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg=UTIL.getConfiguration(); JobConf jobConf=new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME,jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME,jobConf); assertEquals(1,jobConf.getNumMapTasks()); jobConf.setNumMapTasks(10); TableMapReduceUtil.setNumMapTasks(TABLE_NAME,jobConf); TableMapReduceUtil.limitNumMapTasks(TABLE_NAME,jobConf); assertEquals(1,jobConf.getNumMapTasks()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test @SuppressWarnings("deprecation") public void shoudBeValidMapReduceWithPartitionerEvaluation() throws IOException { Configuration cfg=UTIL.getConfiguration(); JobConf jobConf=new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(2); TableMapReduceUtil.initTableMapJob(TABLE_NAME,new String(COLUMN_FAMILY),ClassificatorMapper.class,ImmutableBytesWritable.class,Put.class,jobConf); TableMapReduceUtil.initTableReduceJob(TABLE_NAME,ClassificatorRowReduce.class,jobConf,HRegionPartitioner.class); RunningJob job=JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } }

Class: org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat

InternalCallVerifier EqualityVerifier 
@Test public void testInitTableSnapshotMapperJobConfig() throws Exception { setupCluster(); TableName tableName=TableName.valueOf("testInitTableSnapshotMapperJobConfig"); String snapshotName="foo"; try { createTableAndSnapshot(UTIL,tableName,snapshotName,getStartRow(),getEndRow(),1); JobConf job=new JobConf(UTIL.getConfiguration()); Path tmpTableDir=UTIL.getDataTestDirOnTestFS(snapshotName); TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,COLUMNS,TestTableSnapshotMapper.class,ImmutableBytesWritable.class,NullWritable.class,job,false,tmpTableDir); Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.",HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT,job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,-1),0.01); Assert.assertEquals("Snapshot job should not use BucketCache.",0,job.getFloat("hbase.bucketcache.size",-1),0.01); } finally { UTIL.getHBaseAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }

Class: org.apache.hadoop.hbase.mapreduce.IntegrationTestTableMapReduceUtil

InternalCallVerifier BooleanVerifier 
/** * Look for jars we expect to be on the classpath by name. */ @Test public void testAddDependencyJars() throws Exception { Job job=new Job(); TableMapReduceUtil.addDependencyJars(job); String tmpjars=job.getConfiguration().get("tmpjars"); assertTrue(tmpjars.contains("hbase-common")); assertTrue(tmpjars.contains("hbase-protocol")); assertTrue(tmpjars.contains("hbase-client")); assertTrue(tmpjars.contains("hbase-hadoop-compat")); assertTrue(tmpjars.contains("hbase-server")); assertTrue(tmpjars.contains("zookeeper")); assertTrue(tmpjars.contains("netty")); assertTrue(tmpjars.contains("protobuf")); assertTrue(tmpjars.contains("guava")); assertTrue(tmpjars.contains("htrace")); }

Class: org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatTestBase

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier 
@Test public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { setupCluster(); TableName tableName=TableName.valueOf("testRestoreSnapshotDoesNotCreateBackRefLinks"); String snapshotName="foo"; try { createTableAndSnapshot(UTIL,tableName,snapshotName,getStartRow(),getEndRow(),1); Path tmpTableDir=UTIL.getDataTestDirOnTestFS(snapshotName); testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName,snapshotName,tmpTableDir); Path rootDir=FSUtils.getRootDir(UTIL.getConfiguration()); for ( Path regionDir : FSUtils.getRegionDirs(fs,FSUtils.getTableDir(rootDir,tableName))) { for ( Path storeDir : FSUtils.getFamilyDirs(fs,regionDir)) { for ( FileStatus status : fs.listStatus(storeDir)) { System.out.println(status.getPath()); if (StoreFileInfo.isValid(status)) { Path archiveStoreDir=HFileArchiveUtil.getStoreArchivePath(UTIL.getConfiguration(),tableName,regionDir.getName(),storeDir.getName()); Path path=HFileLink.getBackReferencesDir(storeDir,status.getPath().getName()); assertFalse("There is a back reference in " + path,fs.exists(path)); path=HFileLink.getBackReferencesDir(archiveStoreDir,status.getPath().getName()); assertFalse("There is a back reference in " + path,fs.exists(path)); } } } } } finally { UTIL.getHBaseAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }

Class: org.apache.hadoop.hbase.mapreduce.TestCopyTable

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStartStopRow() throws Exception { final TableName TABLENAME1=TableName.valueOf("testStartStopRow1"); final TableName TABLENAME2=TableName.valueOf("testStartStopRow2"); final byte[] FAMILY=Bytes.toBytes("family"); final byte[] COLUMN1=Bytes.toBytes("c1"); final byte[] ROW0=Bytes.toBytes("row0"); final byte[] ROW1=Bytes.toBytes("row1"); final byte[] ROW2=Bytes.toBytes("row2"); Table t1=TEST_UTIL.createTable(TABLENAME1,FAMILY); Table t2=TEST_UTIL.createTable(TABLENAME2,FAMILY); Put p=new Put(ROW0); p.addColumn(FAMILY,COLUMN1,COLUMN1); t1.put(p); p=new Put(ROW1); p.addColumn(FAMILY,COLUMN1,COLUMN1); t1.put(p); p=new Put(ROW2); p.addColumn(FAMILY,COLUMN1,COLUMN1); t1.put(p); CopyTable copy=new CopyTable(); assertEquals(0,ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()),copy,new String[]{"--new.name=" + TABLENAME2,"--startrow=row1","--stoprow=row2",TABLENAME1.getNameAsString()})); Get g=new Get(ROW1); Result r=t2.get(g); assertEquals(1,r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0],COLUMN1)); g=new Get(ROW0); r=t2.get(g); assertEquals(0,r.size()); g=new Get(ROW2); r=t2.get(g); assertEquals(0,r.size()); t1.close(); t2.close(); TEST_UTIL.deleteTable(TABLENAME1); TEST_UTIL.deleteTable(TABLENAME2); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test copy of table from sourceTable to targetTable all rows from family a */ @Test public void testRenameFamily() throws Exception { TableName sourceTable=TableName.valueOf("sourceTable"); TableName targetTable=TableName.valueOf("targetTable"); byte[][] families={FAMILY_A,FAMILY_B}; Table t=TEST_UTIL.createTable(sourceTable,families); Table t2=TEST_UTIL.createTable(targetTable,families); Put p=new Put(ROW1); p.addColumn(FAMILY_A,QUALIFIER,Bytes.toBytes("Data11")); p.addColumn(FAMILY_B,QUALIFIER,Bytes.toBytes("Data12")); p.addColumn(FAMILY_A,QUALIFIER,Bytes.toBytes("Data13")); t.put(p); p=new Put(ROW2); p.addColumn(FAMILY_B,QUALIFIER,Bytes.toBytes("Dat21")); p.addColumn(FAMILY_A,QUALIFIER,Bytes.toBytes("Data22")); p.addColumn(FAMILY_B,QUALIFIER,Bytes.toBytes("Data23")); t.put(p); long currentTime=System.currentTimeMillis(); String[] args=new String[]{"--new.name=" + targetTable,"--families=a:b","--all.cells","--starttime=" + (currentTime - 100000),"--endtime=" + (currentTime + 100000),"--versions=1",sourceTable.getNameAsString()}; assertNull(t2.get(new Get(ROW1)).getRow()); assertTrue(runCopy(args)); assertNotNull(t2.get(new Get(ROW1)).getRow()); Result res=t2.get(new Get(ROW1)); byte[] b1=res.getValue(FAMILY_B,QUALIFIER); assertEquals("Data13",new String(b1)); assertNotNull(t2.get(new Get(ROW2)).getRow()); res=t2.get(new Get(ROW2)); b1=res.getValue(FAMILY_A,QUALIFIER); assertNull(b1); }

Class: org.apache.hadoop.hbase.mapreduce.TestHFileOutputFormat2

InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier IgnoredMethod 
/** * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf=new Configuration(this.util.getConfiguration()); RecordWriter writer=null; TaskAttemptContext context=null; Path dir=util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job=new Job(conf); FileOutputFormat.setOutputPath(job,dir); context=createTestTaskAttemptContext(job); HFileOutputFormat2 hof=new HFileOutputFormat2(); writer=hof.getRecordWriter(context); final byte[] b=Bytes.toBytes("b"); KeyValue kv=new KeyValue(b,b,b); KeyValue original=kv.clone(); writer.write(new ImmutableBytesWritable(),kv); assertFalse(original.equals(kv)); assertTrue(Bytes.equals(CellUtil.cloneRow(original),CellUtil.cloneRow(kv))); assertTrue(Bytes.equals(CellUtil.cloneFamily(original),CellUtil.cloneFamily(kv))); assertTrue(Bytes.equals(CellUtil.cloneQualifier(original),CellUtil.cloneQualifier(kv))); assertNotSame(original.getTimestamp(),kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP,kv.getTimestamp()); kv=new KeyValue(b,b,b,kv.getTimestamp() - 1,b); original=kv.clone(); writer.write(new ImmutableBytesWritable(),kv); assertTrue(original.equals(kv)); } finally { if (writer != null && context != null) writer.close(context); dir.getFileSystem(conf).delete(dir,true); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier IgnoredMethod 
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void test_TIMERANGE() throws Exception { Configuration conf=new Configuration(this.util.getConfiguration()); RecordWriter writer=null; TaskAttemptContext context=null; Path dir=util.getDataTestDir("test_TIMERANGE_present"); LOG.info("Timerange dir writing to dir: " + dir); try { Job job=new Job(conf); FileOutputFormat.setOutputPath(job,dir); context=createTestTaskAttemptContext(job); HFileOutputFormat2 hof=new HFileOutputFormat2(); writer=hof.getRecordWriter(context); final byte[] b=Bytes.toBytes("b"); KeyValue kv=new KeyValue(b,b,b,2000,b); KeyValue original=kv.clone(); writer.write(new ImmutableBytesWritable(),kv); assertEquals(original,kv); kv=new KeyValue(b,b,b,1000,b); original=kv.clone(); writer.write(new ImmutableBytesWritable(),kv); assertEquals(original,kv); writer.close(context); FileSystem fs=FileSystem.get(conf); Path attemptDirectory=hof.getDefaultWorkFile(context,"").getParent(); FileStatus[] sub1=fs.listStatus(attemptDirectory); FileStatus[] file=fs.listStatus(sub1[0].getPath()); HFile.Reader rd=HFile.createReader(fs,file[0].getPath(),new CacheConfig(conf),conf); Map finfo=rd.loadFileInfo(); byte[] range=finfo.get("TIMERANGE".getBytes()); assertNotNull(range); TimeRangeTracker timeRangeTracker=new TimeRangeTracker(); Writables.copyWritable(range,timeRangeTracker); LOG.info(timeRangeTracker.getMinimumTimestamp() + "...." + timeRangeTracker.getMaximumTimestamp()); assertEquals(1000,timeRangeTracker.getMinimumTimestamp()); assertEquals(2000,timeRangeTracker.getMaximumTimestamp()); rd.close(); } finally { if (writer != null && context != null) writer.close(context); dir.getFileSystem(conf).delete(dir,true); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier IgnoredMethod 
/** * Test that {@link HFileOutputFormat2} RecordWriter uses compression and * bloom filter settings from the column family descriptor */ @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testColumnFamilySettings() throws Exception { Configuration conf=new Configuration(this.util.getConfiguration()); RecordWriter writer=null; TaskAttemptContext context=null; Path dir=util.getDataTestDir("testColumnFamilySettings"); Table table=Mockito.mock(Table.class); RegionLocator regionLocator=Mockito.mock(RegionLocator.class); HTableDescriptor htd=new HTableDescriptor(TABLE_NAME); Mockito.doReturn(htd).when(table).getTableDescriptor(); for ( HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) { htd.addFamily(hcd); } setupMockStartKeys(regionLocator); try { conf.set("io.seqfile.compression.type","NONE"); conf.set("hbase.fs.tmp.dir",dir.toString()); conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY,false); Job job=new Job(conf,"testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings")); setupRandomGeneratorMapper(job); HFileOutputFormat2.configureIncrementalLoad(job,table.getTableDescriptor(),regionLocator); FileOutputFormat.setOutputPath(job,dir); context=createTestTaskAttemptContext(job); HFileOutputFormat2 hof=new HFileOutputFormat2(); writer=hof.getRecordWriter(context); writeRandomKeyValues(writer,context,htd.getFamiliesKeys(),ROWSPERSPLIT); writer.close(context); FileSystem fs=dir.getFileSystem(conf); hof.getOutputCommitter(context).commitTask(context); hof.getOutputCommitter(context).commitJob(context); FileStatus[] families=FSUtils.listStatus(fs,dir,new FSUtils.FamilyDirFilter(fs)); assertEquals(htd.getFamilies().size(),families.length); for ( FileStatus f : families) { String familyStr=f.getPath().getName(); HColumnDescriptor hcd=htd.getFamily(Bytes.toBytes(familyStr)); Path dataFilePath=fs.listStatus(f.getPath())[0].getPath(); Reader reader=HFile.createReader(fs,dataFilePath,new CacheConfig(conf),conf); Map fileInfo=reader.loadFileInfo(); byte[] bloomFilter=fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter=Bytes.toBytes("NONE"); assertEquals("Incorrect bloom filter used for column family " + familyStr + "(reader: "+ reader+ ")",hcd.getBloomFilterType(),BloomType.valueOf(Bytes.toString(bloomFilter))); assertEquals("Incorrect compression used for column family " + familyStr + "(reader: "+ reader+ ")",hcd.getCompressionType(),reader.getFileContext().getCompression()); } } finally { dir.getFileSystem(conf).delete(dir,true); } }

InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
/** * Run small MR job. */ @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testWritingPEData() throws Exception { Configuration conf=util.getConfiguration(); Path testDir=util.getDataTestDirOnTestFS("testWritingPEData"); FileSystem fs=testDir.getFileSystem(conf); conf.setInt("mapreduce.task.io.sort.mb",20); conf.setLong(HConstants.HREGION_MAX_FILESIZE,64 * 1024); Job job=new Job(conf,"testWritingPEData"); setupRandomGeneratorMapper(job); byte[] startKey=new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; byte[] endKey=new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; Arrays.fill(startKey,(byte)0); Arrays.fill(endKey,(byte)0xff); job.setPartitionerClass(SimpleTotalOrderPartitioner.class); SimpleTotalOrderPartitioner.setStartKey(job.getConfiguration(),startKey); SimpleTotalOrderPartitioner.setEndKey(job.getConfiguration(),endKey); job.setReducerClass(KeyValueSortReducer.class); job.setOutputFormatClass(HFileOutputFormat2.class); job.setNumReduceTasks(4); job.getConfiguration().setStrings("io.serializations",conf.get("io.serializations"),MutationSerialization.class.getName(),ResultSerialization.class.getName(),KeyValueSerialization.class.getName()); FileOutputFormat.setOutputPath(job,testDir); assertTrue(job.waitForCompletion(false)); FileStatus[] files=fs.listStatus(testDir); assertTrue(files.length > 0); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier IgnoredMethod 
/** * This test is to test the scenario happened in HBASE-6901. * All files are bulk loaded and excluded from minor compaction. * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException * will be thrown. */ @Ignore("Flakey: See HBASE-9051") @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf=util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min",2); generateRandomStartKeys(5); util.startMiniCluster(); try (Connection conn=ConnectionFactory.createConnection();Admin admin=conn.getAdmin();Table table=util.createTable(TABLE_NAME,FAMILIES);RegionLocator locator=conn.getRegionLocator(TABLE_NAME)){ final FileSystem fs=util.getDFSCluster().getFileSystem(); assertEquals("Should start with empty table",0,util.countRows(table)); final Path storePath=new Path(FSUtils.getTableDir(FSUtils.getRootDir(conf),TABLE_NAME),new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),Bytes.toString(FAMILIES[0]))); assertEquals(0,fs.listStatus(storePath).length); conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",true); for (int i=0; i < 2; i++) { Path testDir=util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); runIncrementalPELoad(conf,table.getTableDescriptor(),conn.getRegionLocator(TABLE_NAME),testDir); new LoadIncrementalHFiles(conf).doBulkLoad(testDir,admin,table,locator); } int expectedRows=2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("LoadIncrementalHFiles should put expected data in table",expectedRows,util.countRows(table)); assertEquals(2,fs.listStatus(storePath).length); admin.compact(TABLE_NAME); try { quickPoll(new Callable(){ @Override public Boolean call() throws Exception { List regions=util.getMiniHBaseCluster().getRegions(TABLE_NAME); for ( HRegion region : regions) { for ( Store store : region.getStores()) { store.closeAndArchiveCompactedFiles(); } } return fs.listStatus(storePath).length == 1; } } ,5000); throw new IOException("SF# = " + fs.listStatus(storePath).length); } catch ( AssertionError ae) { } admin.majorCompact(TABLE_NAME); quickPoll(new Callable(){ @Override public Boolean call() throws Exception { List regions=util.getMiniHBaseCluster().getRegions(TABLE_NAME); for ( HRegion region : regions) { for ( Store store : region.getStores()) { store.closeAndArchiveCompactedFiles(); } } return fs.listStatus(storePath).length == 1; } } ,5000); } finally { util.shutdownMiniCluster(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier IgnoredMethod 
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf=util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min",2); generateRandomStartKeys(5); util.startMiniCluster(); try (Connection conn=ConnectionFactory.createConnection(conf);Admin admin=conn.getAdmin()){ Path testDir=util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs=util.getDFSCluster().getFileSystem(); Table table=util.createTable(TABLE_NAME,FAMILIES); assertEquals("Should start with empty table",0,util.countRows(table)); final Path storePath=new Path(FSUtils.getTableDir(FSUtils.getRootDir(conf),TABLE_NAME),new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),Bytes.toString(FAMILIES[0]))); assertEquals(0,fs.listStatus(storePath).length); Put p=new Put(Bytes.toBytes("test")); p.addColumn(FAMILIES[0],Bytes.toBytes("1"),Bytes.toBytes("1")); table.put(p); admin.flush(TABLE_NAME); assertEquals(1,util.countRows(table)); quickPoll(new Callable(){ @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } } ,5000); conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",true); RegionLocator regionLocator=conn.getRegionLocator(TABLE_NAME); runIncrementalPELoad(conf,table.getTableDescriptor(),regionLocator,testDir); new LoadIncrementalHFiles(conf).doBulkLoad(testDir,admin,table,regionLocator); int expectedRows=NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("LoadIncrementalHFiles should put expected data in table",expectedRows + 1,util.countRows(table)); assertEquals(2,fs.listStatus(storePath).length); admin.compact(TABLE_NAME); try { quickPoll(new Callable(){ @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } } ,5000); throw new IOException("SF# = " + fs.listStatus(storePath).length); } catch ( AssertionError ae) { } admin.majorCompact(TABLE_NAME); quickPoll(new Callable(){ @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } } ,5000); } finally { util.shutdownMiniCluster(); } }

InternalCallVerifier EqualityVerifier HybridVerifier IgnoredMethod 
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testJobConfiguration() throws Exception { Configuration conf=new Configuration(this.util.getConfiguration()); conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY,util.getDataTestDir("testJobConfiguration").toString()); Job job=new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); Table table=Mockito.mock(Table.class); RegionLocator regionLocator=Mockito.mock(RegionLocator.class); setupMockStartKeys(regionLocator); setupMockTableName(regionLocator); HFileOutputFormat2.configureIncrementalLoad(job,table.getTableDescriptor(),regionLocator); assertEquals(job.getNumReduceTasks(),4); }

Class: org.apache.hadoop.hbase.mapreduce.TestHRegionPartitioner

InternalCallVerifier EqualityVerifier 
/** * Test HRegionPartitioner */ @Test(timeout=300000) public void testHRegionPartitioner() throws Exception { byte[][] families={Bytes.toBytes("familyA"),Bytes.toBytes("familyB")}; UTIL.createTable(TableName.valueOf("out_table"),families,1,Bytes.toBytes("aa"),Bytes.toBytes("cc"),3); HRegionPartitioner partitioner=new HRegionPartitioner(); Configuration configuration=UTIL.getConfiguration(); configuration.set(TableOutputFormat.OUTPUT_TABLE,"out_table"); partitioner.setConf(configuration); ImmutableBytesWritable writable=new ImmutableBytesWritable(Bytes.toBytes("bb")); assertEquals(1,partitioner.getPartition(writable,10L,3)); assertEquals(0,partitioner.getPartition(writable,10L,1)); }

Class: org.apache.hadoop.hbase.mapreduce.TestHashTable

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testHashTable() throws Exception { final TableName tableName=TableName.valueOf("testHashTable"); final byte[] family=Bytes.toBytes("family"); final byte[] column1=Bytes.toBytes("c1"); final byte[] column2=Bytes.toBytes("c2"); final byte[] column3=Bytes.toBytes("c3"); int numRows=100; int numRegions=10; int numHashFiles=3; byte[][] splitRows=new byte[numRegions - 1][]; for (int i=1; i < numRegions; i++) { splitRows[i - 1]=Bytes.toBytes(numRows * i / numRegions); } long timestamp=1430764183454L; Table t1=TEST_UTIL.createTable(tableName,family,splitRows); for (int i=0; i < numRows; i++) { Put p=new Put(Bytes.toBytes(i),timestamp); p.addColumn(family,column1,column1); p.addColumn(family,column2,column2); p.addColumn(family,column3,column3); t1.put(p); } t1.close(); HashTable hashTable=new HashTable(TEST_UTIL.getConfiguration()); Path testDir=TEST_UTIL.getDataTestDirOnTestFS(tableName.getNameAsString()); long batchSize=300; int code=hashTable.run(new String[]{"--batchsize=" + batchSize,"--numhashfiles=" + numHashFiles,"--scanbatch=2",tableName.getNameAsString(),testDir.toString()}); assertEquals("test job failed",0,code); FileSystem fs=TEST_UTIL.getTestFileSystem(); HashTable.TableHash tableHash=HashTable.TableHash.read(fs.getConf(),testDir); assertEquals(tableName.getNameAsString(),tableHash.tableName); assertEquals(batchSize,tableHash.batchSize); assertEquals(numHashFiles,tableHash.numHashFiles); assertEquals(numHashFiles - 1,tableHash.partitions.size()); for ( ImmutableBytesWritable bytes : tableHash.partitions) { LOG.debug("partition: " + Bytes.toInt(bytes.get())); } ImmutableMap expectedHashes=ImmutableMap.builder().put(-1,new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))).put(5,new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))).put(10,new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))).put(15,new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))).put(20,new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))).put(25,new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))).put(30,new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))).put(35,new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))).put(40,new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))).put(45,new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))).put(50,new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))).put(55,new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))).put(60,new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))).put(65,new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))).put(70,new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))).put(75,new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))).put(80,new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))).put(85,new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))).put(90,new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))).put(95,new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))).build(); Map actualHashes=new HashMap(); Path dataDir=new Path(testDir,HashTable.HASH_DATA_DIR); for (int i=0; i < numHashFiles; i++) { Path hashPath=new Path(dataDir,HashTable.TableHash.getDataFileName(i)); MapFile.Reader reader=new MapFile.Reader(hashPath,fs.getConf()); ImmutableBytesWritable key=new ImmutableBytesWritable(); ImmutableBytesWritable hash=new ImmutableBytesWritable(); while (reader.next(key,hash)) { String keyString=Bytes.toHex(key.get(),key.getOffset(),key.getLength()); LOG.debug("Key: " + (keyString.isEmpty() ? "-1" : Integer.parseInt(keyString,16)) + " Hash: "+ Bytes.toHex(hash.get(),hash.getOffset(),hash.getLength())); int intKey=-1; if (key.getLength() > 0) { intKey=Bytes.toInt(key.get(),key.getOffset(),key.getLength()); } if (actualHashes.containsKey(intKey)) { Assert.fail("duplicate key in data files: " + intKey); } actualHashes.put(intKey,new ImmutableBytesWritable(hash.copyBytes())); } reader.close(); } FileStatus[] files=fs.listStatus(testDir); for ( FileStatus file : files) { LOG.debug("Output file: " + file.getPath()); } files=fs.listStatus(dataDir); for ( FileStatus file : files) { LOG.debug("Data file: " + file.getPath()); } if (!expectedHashes.equals(actualHashes)) { LOG.error("Diff: " + Maps.difference(expectedHashes,actualHashes)); } Assert.assertEquals(expectedHashes,actualHashes); TEST_UTIL.deleteTable(tableName); TEST_UTIL.cleanupDataTestDirOnTestFS(); }

Class: org.apache.hadoop.hbase.mapreduce.TestImportExport

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testDurability() throws Exception { String exportTableName="exporttestDurability"; try (Table exportTable=UTIL.createTable(TableName.valueOf(exportTableName),FAMILYA,3)){ Put put=new Put(ROW1); put.addColumn(FAMILYA,QUAL,now,QUAL); put.addColumn(FAMILYA,QUAL,now + 1,QUAL); put.addColumn(FAMILYA,QUAL,now + 2,QUAL); exportTable.put(put); put=new Put(ROW2); put.addColumn(FAMILYA,QUAL,now,QUAL); put.addColumn(FAMILYA,QUAL,now + 1,QUAL); put.addColumn(FAMILYA,QUAL,now + 2,QUAL); exportTable.put(put); String[] args=new String[]{exportTableName,FQ_OUTPUT_DIR,"1000"}; assertTrue(runExport(args)); String importTableName="importTestDurability1"; Table importTable=UTIL.createTable(TableName.valueOf(importTableName),FAMILYA,3); TableWALActionListener walListener=new TableWALActionListener(importTableName); HRegionInfo region=UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer().getOnlineRegions(importTable.getName()).get(0).getRegionInfo(); WAL wal=UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); wal.registerWALActionsListener(walListener); args=new String[]{"-D" + Import.WAL_DURABILITY + "="+ Durability.SKIP_WAL.name(),importTableName,FQ_OUTPUT_DIR}; assertTrue(runImport(args)); assertTrue(!walListener.isWALVisited()); assertTrue(getCount(importTable,null) == 2); importTableName="importTestDurability2"; importTable=UTIL.createTable(TableName.valueOf(importTableName),FAMILYA,3); region=UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer().getOnlineRegions(importTable.getName()).get(0).getRegionInfo(); wal=UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL(region); walListener=new TableWALActionListener(importTableName); wal.registerWALActionsListener(walListener); args=new String[]{importTableName,FQ_OUTPUT_DIR}; assertTrue(runImport(args)); assertTrue(walListener.isWALVisited()); assertTrue(getCount(importTable,null) == 2); } }

InternalCallVerifier EqualityVerifier 
/** * Test addFilterAndArguments method of Import This method set couple * parameters into Configuration */ @Test public void testAddFilterAndArguments() throws IOException { Configuration configuration=new Configuration(); List args=new ArrayList(); args.add("param1"); args.add("param2"); Import.addFilterAndArguments(configuration,FilterBase.class,args); assertEquals("org.apache.hadoop.hbase.filter.FilterBase",configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2",configuration.get(Import.FILTER_ARGS_CONF_KEY)); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Exception { TableName EXPORT_TABLE=TableName.valueOf("exportWithMultipleDeleteFamilyMarkersOfSameRowSameFamily"); HTableDescriptor desc=new HTableDescriptor(EXPORT_TABLE); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE)); UTIL.getHBaseAdmin().createTable(desc); Table exportT=UTIL.getConnection().getTable(EXPORT_TABLE); Put p=new Put(ROW1); p.addColumn(FAMILYA,QUAL,now,QUAL); exportT.put(p); Delete d=new Delete(ROW1,now + 3); exportT.delete(d); p=new Put(ROW1); p.addColumn(FAMILYA,QUAL,now + 5,"s".getBytes()); exportT.put(p); d=new Delete(ROW1,now + 7); exportT.delete(d); String[] args=new String[]{"-D" + Export.RAW_SCAN + "=true",EXPORT_TABLE.getNameAsString(),FQ_OUTPUT_DIR,"1000"}; assertTrue(runExport(args)); String IMPORT_TABLE="importWithMultipleDeleteFamilyMarkersOfSameRowSameFamily"; desc=new HTableDescriptor(TableName.valueOf(IMPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE)); UTIL.getHBaseAdmin().createTable(desc); Table importT=UTIL.getConnection().getTable(TableName.valueOf(IMPORT_TABLE)); args=new String[]{IMPORT_TABLE,FQ_OUTPUT_DIR}; assertTrue(runImport(args)); Scan s=new Scan(); s.setMaxVersions(); s.setRaw(true); ResultScanner importedTScanner=importT.getScanner(s); Result importedTResult=importedTScanner.next(); ResultScanner exportedTScanner=exportT.getScanner(s); Result exportedTResult=exportedTScanner.next(); try { Result.compareResults(exportedTResult,importedTResult); } catch ( Exception e) { fail("Original and imported tables data comparision failed with error:" + e.getMessage()); } finally { exportT.close(); importT.close(); } }

InternalCallVerifier EqualityVerifier 
/** * Test map method of Importer */ @SuppressWarnings({"unchecked","rawtypes"}) @Test public void testKeyValueImporter() throws Exception { KeyValueImporter importer=new KeyValueImporter(); Configuration configuration=new Configuration(); Context ctx=mock(Context.class); when(ctx.getConfiguration()).thenReturn(configuration); doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { ImmutableBytesWritable writer=(ImmutableBytesWritable)invocation.getArguments()[0]; KeyValue key=(KeyValue)invocation.getArguments()[1]; assertEquals("Key",Bytes.toString(writer.get())); assertEquals("row",Bytes.toString(CellUtil.cloneRow(key))); return null; } } ).when(ctx).write(any(ImmutableBytesWritable.class),any(KeyValue.class)); importer.setup(ctx); Result value=mock(Result.class); KeyValue[] keys={new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("family"),Bytes.toBytes("qualifier"),Bytes.toBytes("value")),new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("family"),Bytes.toBytes("qualifier"),Bytes.toBytes("value1"))}; when(value.rawCells()).thenReturn(keys); importer.map(new ImmutableBytesWritable(Bytes.toBytes("Key")),value,ctx); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, * attempt with invalid values. */ @Test public void testWithFilter() throws Exception { String EXPORT_TABLE="exportSimpleCase_ImportWithFilter"; HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(EXPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5)); UTIL.getHBaseAdmin().createTable(desc); Table exportTable=UTIL.getConnection().getTable(desc.getTableName()); Put p1=new Put(ROW1); p1.addColumn(FAMILYA,QUAL,now,QUAL); p1.addColumn(FAMILYA,QUAL,now + 1,QUAL); p1.addColumn(FAMILYA,QUAL,now + 2,QUAL); p1.addColumn(FAMILYA,QUAL,now + 3,QUAL); p1.addColumn(FAMILYA,QUAL,now + 4,QUAL); Put p2=new Put(ROW2); p2.addColumn(FAMILYA,QUAL,now,QUAL); exportTable.put(Arrays.asList(p1,p2)); String[] args=new String[]{EXPORT_TABLE,FQ_OUTPUT_DIR,"1000"}; assertTrue(runExport(args)); String IMPORT_TABLE="importWithFilter"; desc=new HTableDescriptor(TableName.valueOf(IMPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5)); UTIL.getHBaseAdmin().createTable(desc); Table importTable=UTIL.getConnection().getTable(desc.getTableName()); args=new String[]{"-D" + Import.FILTER_CLASS_CONF_KEY + "="+ PrefixFilter.class.getName(),"-D" + Import.FILTER_ARGS_CONF_KEY + "="+ Bytes.toString(ROW1),IMPORT_TABLE,FQ_OUTPUT_DIR,"1000"}; assertTrue(runImport(args)); PrefixFilter filter=new PrefixFilter(ROW1); int count=getCount(exportTable,filter); Assert.assertEquals("Unexpected row count between export and import tables",count,getCount(importTable,null)); args=new String[]{"-D" + Import.FILTER_CLASS_CONF_KEY + "="+ Filter.class.getName(),"-D" + Import.FILTER_ARGS_CONF_KEY + "="+ Bytes.toString(ROW1)+ "",EXPORT_TABLE,FQ_OUTPUT_DIR,"1000"}; assertFalse(runImport(args)); exportTable.close(); importTable.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test import data from 0.94 exported file * @throws Exception */ @Test public void testImport94Table() throws Exception { final String name="exportedTableIn94Format"; URL url=TestImportExport.class.getResource(name); File f=new File(url.toURI()); if (!f.exists()) { LOG.warn("FAILED TO FIND " + f + "; skipping out on test"); return; } assertTrue(f.exists()); LOG.info("FILE=" + f); Path importPath=new Path(f.toURI()); FileSystem fs=FileSystem.get(UTIL.getConfiguration()); fs.copyFromLocalFile(importPath,new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE=name; try (Table t=UTIL.createTable(TableName.valueOf(IMPORT_TABLE),Bytes.toBytes("f1"),3)){ String[] args=new String[]{"-Dhbase.import.version=0.94",IMPORT_TABLE,FQ_OUTPUT_DIR}; assertTrue(runImport(args)); assertEquals(5,UTIL.countRows(t)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWithDeletes() throws Exception { String EXPORT_TABLE="exportWithDeletes"; HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(EXPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE)); UTIL.getHBaseAdmin().createTable(desc); try (Table t=UTIL.getConnection().getTable(desc.getTableName())){ Put p=new Put(ROW1); p.addColumn(FAMILYA,QUAL,now,QUAL); p.addColumn(FAMILYA,QUAL,now + 1,QUAL); p.addColumn(FAMILYA,QUAL,now + 2,QUAL); p.addColumn(FAMILYA,QUAL,now + 3,QUAL); p.addColumn(FAMILYA,QUAL,now + 4,QUAL); t.put(p); Delete d=new Delete(ROW1,now + 3); t.delete(d); d=new Delete(ROW1); d.addColumns(FAMILYA,QUAL,now + 2); t.delete(d); } String[] args=new String[]{"-D" + Export.RAW_SCAN + "=true",EXPORT_TABLE,FQ_OUTPUT_DIR,"1000"}; assertTrue(runExport(args)); String IMPORT_TABLE="importWithDeletes"; desc=new HTableDescriptor(TableName.valueOf(IMPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE)); UTIL.getHBaseAdmin().createTable(desc); try (Table t=UTIL.getConnection().getTable(desc.getTableName())){ args=new String[]{IMPORT_TABLE,FQ_OUTPUT_DIR}; assertTrue(runImport(args)); Scan s=new Scan(); s.setMaxVersions(); s.setRaw(true); ResultScanner scanner=t.getScanner(s); Result r=scanner.next(); Cell[] res=r.rawCells(); assertTrue(CellUtil.isDeleteFamily(res[0])); assertEquals(now + 4,res[1].getTimestamp()); assertEquals(now + 3,res[2].getTimestamp()); assertTrue(CellUtil.isDelete(res[3])); assertEquals(now + 2,res[4].getTimestamp()); assertEquals(now + 1,res[5].getTimestamp()); assertEquals(now,res[6].getTimestamp()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test simple replication case with column mapping * @throws Exception */ @Test public void testSimpleCase() throws Exception { String EXPORT_TABLE="exportSimpleCase"; try (Table t=UTIL.createTable(TableName.valueOf(EXPORT_TABLE),FAMILYA,3)){ Put p=new Put(ROW1); p.addColumn(FAMILYA,QUAL,now,QUAL); p.addColumn(FAMILYA,QUAL,now + 1,QUAL); p.addColumn(FAMILYA,QUAL,now + 2,QUAL); t.put(p); p=new Put(ROW2); p.addColumn(FAMILYA,QUAL,now,QUAL); p.addColumn(FAMILYA,QUAL,now + 1,QUAL); p.addColumn(FAMILYA,QUAL,now + 2,QUAL); t.put(p); } String[] args=new String[]{EXPORT_TABLE,FQ_OUTPUT_DIR,"1000"}; assertTrue(runExport(args)); String IMPORT_TABLE="importTableSimpleCase"; try (Table t=UTIL.createTable(TableName.valueOf(IMPORT_TABLE),FAMILYB,3)){ args=new String[]{"-D" + Import.CF_RENAME_PROP + "="+ FAMILYA_STRING+ ":"+ FAMILYB_STRING,IMPORT_TABLE,FQ_OUTPUT_DIR}; assertTrue(runImport(args)); Get g=new Get(ROW1); g.setMaxVersions(); Result r=t.get(g); assertEquals(3,r.size()); g=new Get(ROW2); g.setMaxVersions(); r=t.get(g); assertEquals(3,r.size()); } }

Class: org.apache.hadoop.hbase.mapreduce.TestImportTsv

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJobConfigurationsWithTsvImporterTextMapper() throws Exception { Path bulkOutputPath=new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); String INPUT_FILE="InputFile1.csv"; String[] args=new String[]{"-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper","-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B","-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,","-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "="+ bulkOutputPath.toString(),tn.getNameAsString(),INPUT_FILE}; assertEquals("running test job configuration failed.",0,ToolRunner.run(new Configuration(util.getConfiguration()),new ImportTsv(){ @Override public int run( String[] args) throws Exception { Job job=createSubmittableJob(getConf(),args); assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); assertTrue(job.getReducerClass().equals(TextSortReducer.class)); assertTrue(job.getMapOutputValueClass().equals(Text.class)); return 0; } } ,args)); util.deleteTable(tn); }

InternalCallVerifier EqualityVerifier 
@Test public void testMRWithoutAnExistingTable() throws Exception { String[] args=new String[]{tn.getNameAsString(),"/inputFile"}; exception.expect(TableNotFoundException.class); assertEquals("running test job configuration failed.",0,ToolRunner.run(new Configuration(util.getConfiguration()),new ImportTsv(){ @Override public int run( String[] args) throws Exception { createSubmittableJob(getConf(),args); return 0; } } ,args)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJobConfigurationsWithDryMode() throws Exception { Path bulkOutputPath=new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); String INPUT_FILE="InputFile1.csv"; String[] argsArray=new String[]{"-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B","-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,","-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "="+ bulkOutputPath.toString(),"-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true",tn.getNameAsString(),INPUT_FILE}; assertEquals("running test job configuration failed.",0,ToolRunner.run(new Configuration(util.getConfiguration()),new ImportTsv(){ @Override public int run( String[] args) throws Exception { Job job=createSubmittableJob(getConf(),args); assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); return 0; } } ,argsArray)); util.deleteTable(tn); }

InternalCallVerifier EqualityVerifier 
@Test public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception { String[] args=new String[]{tn.getNameAsString(),"/inputFile"}; Configuration conf=new Configuration(util.getConfiguration()); conf.set(ImportTsv.COLUMNS_CONF_KEY,"HBASE_ROW_KEY,FAM:A"); conf.set(ImportTsv.BULK_OUTPUT_CONF_KEY,"/output"); conf.set(ImportTsv.CREATE_TABLE_CONF_KEY,"no"); exception.expect(TableNotFoundException.class); assertEquals("running test job configuration failed.",0,ToolRunner.run(new Configuration(util.getConfiguration()),new ImportTsv(){ @Override public int run( String[] args) throws Exception { createSubmittableJob(getConf(),args); return 0; } } ,args)); }

Class: org.apache.hadoop.hbase.mapreduce.TestImportTsvParser

InternalCallVerifier EqualityVerifier ExceptionVerifier PublicFieldVerifier HybridVerifier 
@Test(expected=BadTsvLineException.class) public void testTsvParserInvalidTimestamp() throws BadTsvLineException { TsvParser parser=new TsvParser("HBASE_ROW_KEY,HBASE_TS_KEY,col_a,","\t"); assertEquals(1,parser.getTimestampKeyColumnIndex()); byte[] line=Bytes.toBytes("rowkey\ttimestamp\tval_a"); ParsedLine parsed=parser.parse(line,line.length); assertEquals(-1,parsed.getTimestamp(-1)); checkParsing(parsed,Splitter.on("\t").split(Bytes.toString(line))); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testTsvParser() throws BadTsvLineException { TsvParser parser=new TsvParser("col_a,col_b:qual,HBASE_ROW_KEY,col_d","\t"); assertBytesEquals(Bytes.toBytes("col_a"),parser.getFamily(0)); assertBytesEquals(HConstants.EMPTY_BYTE_ARRAY,parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col_b"),parser.getFamily(1)); assertBytesEquals(Bytes.toBytes("qual"),parser.getQualifier(1)); assertNull(parser.getFamily(2)); assertNull(parser.getQualifier(2)); assertEquals(2,parser.getRowKeyColumnIndex()); assertEquals(TsvParser.DEFAULT_TIMESTAMP_COLUMN_INDEX,parser.getTimestampKeyColumnIndex()); byte[] line=Bytes.toBytes("val_a\tval_b\tval_c\tval_d"); ParsedLine parsed=parser.parse(line,line.length); checkParsing(parsed,Splitter.on("\t").split(Bytes.toString(line))); }

UtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testTsvParseAttributesKey() throws BadTsvLineException { TsvParser parser=new TsvParser("HBASE_ROW_KEY,col_a,HBASE_TS_KEY,HBASE_ATTRIBUTES_KEY","\t"); assertEquals(0,parser.getRowKeyColumnIndex()); byte[] line=Bytes.toBytes("rowkey\tval_a\t1234\tkey=>value"); ParsedLine parse=parser.parse(line,line.length); assertEquals(18,parse.getAttributeKeyOffset()); assertEquals(3,parser.getAttributesKeyColumnIndex()); String attributes[]=parse.getIndividualAttributes(); assertEquals(attributes[0],"key=>value"); try { line=Bytes.toBytes("rowkey\tval_a\t1234"); parser.parse(line,line.length); fail("Should get BadTsvLineException on empty rowkey."); } catch ( BadTsvLineException b) { } parser=new TsvParser("HBASE_ATTRIBUTES_KEY,col_a,HBASE_ROW_KEY,HBASE_TS_KEY","\t"); assertEquals(2,parser.getRowKeyColumnIndex()); line=Bytes.toBytes("key=>value\tval_a\trowkey\t1234"); parse=parser.parse(line,line.length); assertEquals(0,parse.getAttributeKeyOffset()); assertEquals(0,parser.getAttributesKeyColumnIndex()); attributes=parse.getIndividualAttributes(); assertEquals(attributes[0],"key=>value"); try { line=Bytes.toBytes("val_a"); ParsedLine parse2=parser.parse(line,line.length); fail("Should get BadTsvLineException when number of columns less than rowkey position."); } catch ( BadTsvLineException b) { } parser=new TsvParser("col_a,HBASE_ATTRIBUTES_KEY,HBASE_TS_KEY,HBASE_ROW_KEY","\t"); assertEquals(3,parser.getRowKeyColumnIndex()); line=Bytes.toBytes("val_a\tkey0=>value0,key1=>value1,key2=>value2\t1234\trowkey"); parse=parser.parse(line,line.length); assertEquals(1,parser.getAttributesKeyColumnIndex()); assertEquals(6,parse.getAttributeKeyOffset()); String[] attr=parse.getIndividualAttributes(); int i=0; for ( String str : attr) { assertEquals(("key" + i + "=>"+ "value"+ i),str); i++; } }

InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testTsvParserWithTimestamp() throws BadTsvLineException { TsvParser parser=new TsvParser("HBASE_ROW_KEY,HBASE_TS_KEY,col_a,","\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertNull(parser.getFamily(1)); assertNull(parser.getQualifier(1)); assertBytesEquals(Bytes.toBytes("col_a"),parser.getFamily(2)); assertBytesEquals(HConstants.EMPTY_BYTE_ARRAY,parser.getQualifier(2)); assertEquals(0,parser.getRowKeyColumnIndex()); assertEquals(1,parser.getTimestampKeyColumnIndex()); byte[] line=Bytes.toBytes("rowkey\t1234\tval_a"); ParsedLine parsed=parser.parse(line,line.length); assertEquals(1234l,parsed.getTimestamp(-1)); checkParsing(parsed,Splitter.on("\t").split(Bytes.toString(line))); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testTsvParserSpecParsing(){ TsvParser parser; parser=new TsvParser("HBASE_ROW_KEY","\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertEquals(0,parser.getRowKeyColumnIndex()); assertFalse(parser.hasTimestamp()); parser=new TsvParser("HBASE_ROW_KEY,col1:scol1","\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(1)); assertBytesEquals(Bytes.toBytes("scol1"),parser.getQualifier(1)); assertEquals(0,parser.getRowKeyColumnIndex()); assertFalse(parser.hasTimestamp()); parser=new TsvParser("HBASE_ROW_KEY,col1:scol1,col1:scol2","\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(1)); assertBytesEquals(Bytes.toBytes("scol1"),parser.getQualifier(1)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(2)); assertBytesEquals(Bytes.toBytes("scol2"),parser.getQualifier(2)); assertEquals(0,parser.getRowKeyColumnIndex()); assertFalse(parser.hasTimestamp()); parser=new TsvParser("HBASE_ROW_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2","\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(1)); assertBytesEquals(Bytes.toBytes("scol1"),parser.getQualifier(1)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(3)); assertBytesEquals(Bytes.toBytes("scol2"),parser.getQualifier(3)); assertEquals(0,parser.getRowKeyColumnIndex()); assertTrue(parser.hasTimestamp()); assertEquals(2,parser.getTimestampKeyColumnIndex()); parser=new TsvParser("HBASE_ROW_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ATTRIBUTES_KEY","\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(1)); assertBytesEquals(Bytes.toBytes("scol1"),parser.getQualifier(1)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(3)); assertBytesEquals(Bytes.toBytes("scol2"),parser.getQualifier(3)); assertEquals(0,parser.getRowKeyColumnIndex()); assertTrue(parser.hasTimestamp()); assertEquals(2,parser.getTimestampKeyColumnIndex()); assertEquals(4,parser.getAttributesKeyColumnIndex()); parser=new TsvParser("HBASE_ATTRIBUTES_KEY,col1:scol1,HBASE_TS_KEY,col1:scol2,HBASE_ROW_KEY","\t"); assertNull(parser.getFamily(0)); assertNull(parser.getQualifier(0)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(1)); assertBytesEquals(Bytes.toBytes("scol1"),parser.getQualifier(1)); assertBytesEquals(Bytes.toBytes("col1"),parser.getFamily(3)); assertBytesEquals(Bytes.toBytes("scol2"),parser.getQualifier(3)); assertEquals(4,parser.getRowKeyColumnIndex()); assertTrue(parser.hasTimestamp()); assertEquals(2,parser.getTimestampKeyColumnIndex()); assertEquals(0,parser.getAttributesKeyColumnIndex()); }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testTsvParserWithCellVisibilityCol() throws BadTsvLineException { TsvParser parser=new TsvParser("HBASE_ROW_KEY,col_a,HBASE_TS_KEY,HBASE_ATTRIBUTES_KEY,HBASE_CELL_VISIBILITY","\t"); assertEquals(0,parser.getRowKeyColumnIndex()); assertEquals(4,parser.getCellVisibilityColumnIndex()); byte[] line=Bytes.toBytes("rowkey\tval_a\t1234\tkey=>value\tPRIVATE&SECRET"); ParsedLine parse=parser.parse(line,line.length); assertEquals(18,parse.getAttributeKeyOffset()); assertEquals(3,parser.getAttributesKeyColumnIndex()); String attributes[]=parse.getIndividualAttributes(); assertEquals(attributes[0],"key=>value"); assertEquals(29,parse.getCellVisibilityColumnOffset()); }

UtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testTsvParserParseRowKey() throws BadTsvLineException { TsvParser parser=new TsvParser("HBASE_ROW_KEY,col_a,HBASE_TS_KEY","\t"); assertEquals(0,parser.getRowKeyColumnIndex()); byte[] line=Bytes.toBytes("rowkey\tval_a\t1234"); Pair rowKeyOffsets=parser.parseRowKey(line,line.length); assertEquals(0,rowKeyOffsets.getFirst().intValue()); assertEquals(6,rowKeyOffsets.getSecond().intValue()); try { line=Bytes.toBytes("\t\tval_a\t1234"); parser.parseRowKey(line,line.length); fail("Should get BadTsvLineException on empty rowkey."); } catch ( BadTsvLineException b) { } parser=new TsvParser("col_a,HBASE_ROW_KEY,HBASE_TS_KEY","\t"); assertEquals(1,parser.getRowKeyColumnIndex()); line=Bytes.toBytes("val_a\trowkey\t1234"); rowKeyOffsets=parser.parseRowKey(line,line.length); assertEquals(6,rowKeyOffsets.getFirst().intValue()); assertEquals(6,rowKeyOffsets.getSecond().intValue()); try { line=Bytes.toBytes("val_a"); rowKeyOffsets=parser.parseRowKey(line,line.length); fail("Should get BadTsvLineException when number of columns less than rowkey position."); } catch ( BadTsvLineException b) { } parser=new TsvParser("col_a,HBASE_TS_KEY,HBASE_ROW_KEY","\t"); assertEquals(2,parser.getRowKeyColumnIndex()); line=Bytes.toBytes("val_a\t1234\trowkey"); rowKeyOffsets=parser.parseRowKey(line,line.length); assertEquals(11,rowKeyOffsets.getFirst().intValue()); assertEquals(6,rowKeyOffsets.getSecond().intValue()); }

Class: org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFiles

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=120000) public void testSplitStoreFile() throws IOException { Path dir=util.getDataTestDirOnTestFS("testSplitHFile"); FileSystem fs=util.getTestFileSystem(); Path testIn=new Path(dir,"testhfile"); HColumnDescriptor familyDesc=new HColumnDescriptor(FAMILY); HFileTestUtil.createHFile(util.getConfiguration(),fs,testIn,FAMILY,QUALIFIER,Bytes.toBytes("aaa"),Bytes.toBytes("zzz"),1000); Path bottomOut=new Path(dir,"bottom.out"); Path topOut=new Path(dir,"top.out"); LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(),testIn,familyDesc,Bytes.toBytes("ggg"),bottomOut,topOut); int rowCount=verifyHFile(bottomOut); rowCount+=verifyHFile(topOut); assertEquals(1000,rowCount); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that tags survive through a bulk load that needs to split hfiles. * This test depends on the "hbase.client.rpc.codec" = KeyValueCodecWithTags so that the client * can get tags in the responses. */ @Test(timeout=60000) public void testTagsSurviveBulkLoadSplit() throws Exception { Path dir=util.getDataTestDirOnTestFS(tn.getMethodName()); FileSystem fs=util.getTestFileSystem(); dir=dir.makeQualified(fs); Path familyDir=new Path(dir,Bytes.toString(FAMILY)); byte[][] tableSplitKeys=new byte[][]{Bytes.toBytes("aaa"),Bytes.toBytes("fff"),Bytes.toBytes("jjj"),Bytes.toBytes("ppp"),Bytes.toBytes("uuu"),Bytes.toBytes("zzz")}; byte[] from=Bytes.toBytes("ddd"); byte[] to=Bytes.toBytes("ooo"); HFileTestUtil.createHFileWithTags(util.getConfiguration(),fs,new Path(familyDir,tn.getMethodName() + "_hfile"),FAMILY,QUALIFIER,from,to,1000); int expectedRows=1000; TableName tableName=TableName.valueOf(tn.getMethodName()); HTableDescriptor htd=buildHTD(tableName,BloomType.NONE); util.getAdmin().createTable(htd,tableSplitKeys); LoadIncrementalHFiles loader=new LoadIncrementalHFiles(util.getConfiguration()); String[] args={dir.toString(),tableName.toString()}; loader.run(args); Table table=util.getConnection().getTable(tableName); try { assertEquals(expectedRows,util.countRows(table)); HFileTestUtil.verifyTags(table); } finally { table.close(); } util.deleteTable(tableName); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=120000) public void testTableWithCFNameStartWithUnderScore() throws Exception { Path dir=util.getDataTestDirOnTestFS("cfNameStartWithUnderScore"); FileSystem fs=util.getTestFileSystem(); dir=dir.makeQualified(fs.getUri(),fs.getWorkingDirectory()); String family="_cf"; Path familyDir=new Path(dir,family); byte[] from=Bytes.toBytes("begin"); byte[] to=Bytes.toBytes("end"); Configuration conf=util.getConfiguration(); String tableName="mytable_cfNameStartWithUnderScore"; Table table=util.createTable(TableName.valueOf(tableName),family); HFileTestUtil.createHFile(conf,fs,new Path(familyDir,"hfile"),Bytes.toBytes(family),QUALIFIER,from,to,1000); LoadIncrementalHFiles loader=new LoadIncrementalHFiles(conf); String[] args={dir.toString(),tableName}; try { loader.run(args); assertEquals(1000,util.countRows(table)); } finally { if (null != table) { table.close(); } } }

Class: org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFilesSplitRecovery

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=120000) public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception { TableName tableName=TableName.valueOf("testGroupOrSplitWhenRegionHoleExistsInMeta"); byte[][] SPLIT_KEYS=new byte[][]{Bytes.toBytes("row_00000100")}; Connection connection=ConnectionFactory.createConnection(util.getConfiguration()); Table table=connection.getTable(tableName); setupTableWithSplitkeys(tableName,10,SPLIT_KEYS); Path dir=buildBulkFiles(tableName,2); final AtomicInteger countedLqis=new AtomicInteger(); LoadIncrementalHFiles loader=new LoadIncrementalHFiles(util.getConfiguration()){ protected List groupOrSplit( Multimap regionGroups, final LoadQueueItem item, final Table htable, final Pair startEndKeys) throws IOException { List lqis=super.groupOrSplit(regionGroups,item,htable,startEndKeys); if (lqis != null) { countedLqis.addAndGet(lqis.size()); } return lqis; } } ; try (Table t=connection.getTable(tableName);RegionLocator locator=connection.getRegionLocator(tableName);Admin admin=connection.getAdmin()){ loader.doBulkLoad(dir,admin,t,locator); } catch ( Exception e) { LOG.error("exeception=",e); } this.assertExpectedTable(tableName,ROWCOUNT,2); dir=buildBulkFiles(tableName,3); List regionInfos=MetaTableAccessor.getTableRegions(connection,tableName); for ( HRegionInfo regionInfo : regionInfos) { if (Bytes.equals(regionInfo.getStartKey(),HConstants.EMPTY_BYTE_ARRAY)) { MetaTableAccessor.deleteRegion(connection,regionInfo); break; } } try (Table t=connection.getTable(tableName);RegionLocator locator=connection.getRegionLocator(tableName);Admin admin=connection.getAdmin()){ loader.doBulkLoad(dir,admin,t,locator); } catch ( Exception e) { LOG.error("exception=",e); assertTrue("IOException expected",e instanceof IOException); } table.close(); regionInfos=MetaTableAccessor.getTableRegions(connection,tableName); assertTrue(regionInfos.size() >= 1); this.assertExpectedTable(connection,tableName,ROWCOUNT,2); connection.close(); }

Class: org.apache.hadoop.hbase.mapreduce.TestMapReduceExamples

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test SampleUploader from examples */ @SuppressWarnings("unchecked") @Test public void testSampleUploader() throws Exception { Configuration configuration=new Configuration(); Uploader uploader=new Uploader(); Mapper.Context ctx=mock(Context.class); doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { ImmutableBytesWritable writer=(ImmutableBytesWritable)invocation.getArguments()[0]; Put put=(Put)invocation.getArguments()[1]; assertEquals("row",Bytes.toString(writer.get())); assertEquals("row",Bytes.toString(put.getRow())); return null; } } ).when(ctx).write(any(ImmutableBytesWritable.class),any(Put.class)); uploader.map(null,new Text("row,family,qualifier,value"),ctx); Path dir=util.getDataTestDirOnTestFS("testSampleUploader"); String[] args={dir.toString(),"simpleTable"}; Job job=SampleUploader.configureJob(configuration,args); assertEquals(SequenceFileInputFormat.class,job.getInputFormatClass()); }

InternalCallVerifier EqualityVerifier 
/** * Test IndexBuilder from examples */ @SuppressWarnings("unchecked") @Test public void testIndexBuilder() throws Exception { Configuration configuration=new Configuration(); String[] args={"tableName","columnFamily","column1","column2"}; IndexBuilder.configureJob(configuration,args); assertEquals("tableName",configuration.get("index.tablename")); assertEquals("tableName",configuration.get(TableInputFormat.INPUT_TABLE)); assertEquals("column1,column2",configuration.get("index.fields")); Map map=new Map(); ImmutableBytesWritable rowKey=new ImmutableBytesWritable(Bytes.toBytes("test")); Mapper.Context ctx=mock(Context.class); when(ctx.getConfiguration()).thenReturn(configuration); doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { ImmutableBytesWritable writer=(ImmutableBytesWritable)invocation.getArguments()[0]; Put put=(Put)invocation.getArguments()[1]; assertEquals("tableName-column1",Bytes.toString(writer.get())); assertEquals("test",Bytes.toString(put.getRow())); return null; } } ).when(ctx).write(any(ImmutableBytesWritable.class),any(Put.class)); Result result=mock(Result.class); when(result.getValue(Bytes.toBytes("columnFamily"),Bytes.toBytes("column1"))).thenReturn(Bytes.toBytes("test")); map.setup(ctx); map.map(rowKey,result,ctx); }

Class: org.apache.hadoop.hbase.mapreduce.TestMultiTableSnapshotInputFormatImpl

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSetInputSetsSnapshotToScans() throws Exception { callSetInput(); Map> actual=subject.getSnapshotsToScans(conf); Map> actualWithEquals=toScanWithEquals(actual); Map> expectedWithEquals=toScanWithEquals(snapshotScans); assertEquals(expectedWithEquals,actualWithEquals); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetInputPushesRestoreDirectories() throws Exception { callSetInput(); Map restoreDirs=subject.getSnapshotDirs(conf); assertEquals(this.snapshotScans.keySet(),restoreDirs.keySet()); }

Class: org.apache.hadoop.hbase.mapreduce.TestRowCounter

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test main method. Import should print help and call System.exit */ @Test public void testImportMain() throws Exception { PrintStream oldPrintStream=System.err; SecurityManager SECURITY_MANAGER=System.getSecurityManager(); LauncherSecurityManager newSecurityManager=new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data=new ByteArrayOutputStream(); String[] args={}; System.setErr(new PrintStream(data)); try { System.setErr(new PrintStream(data)); try { RowCounter.main(args); fail("should be SecurityException"); } catch ( SecurityException e) { assertEquals(-1,newSecurityManager.getExitCode()); assertTrue(data.toString().contains("Wrong number of parameters:")); assertTrue(data.toString().contains("Usage: RowCounter [options] " + "[--starttime=[start] --endtime=[end] " + "[--range=[startKey],[endKey]] "+ "[ ...]")); assertTrue(data.toString().contains("-Dhbase.client.scanner.caching=100")); assertTrue(data.toString().contains("-Dmapreduce.map.speculative=false")); } data.reset(); try { args=new String[2]; args[0]="table"; args[1]="--range=1"; RowCounter.main(args); fail("should be SecurityException"); } catch ( SecurityException e) { assertEquals(-1,newSecurityManager.getExitCode()); assertTrue(data.toString().contains("Please specify range in such format as \"--range=a,b\" or, with only one boundary," + " \"--range=,b\" or \"--range=a,\"")); assertTrue(data.toString().contains("Usage: RowCounter [options] " + "[--starttime=[start] --endtime=[end] " + "[--range=[startKey],[endKey]] "+ "[ ...]")); } } finally { System.setErr(oldPrintStream); System.setSecurityManager(SECURITY_MANAGER); } }

Class: org.apache.hadoop.hbase.mapreduce.TestSimpleTotalOrderPartitioner

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplit() throws Exception { String start="a"; String end="{"; SimpleTotalOrderPartitioner p=new SimpleTotalOrderPartitioner(); this.conf.set(SimpleTotalOrderPartitioner.START,start); this.conf.set(SimpleTotalOrderPartitioner.END,end); p.setConf(this.conf); ImmutableBytesWritable c=new ImmutableBytesWritable(Bytes.toBytes("c")); int partition=p.getPartition(c,HConstants.EMPTY_BYTE_ARRAY,1); assertEquals(0,partition); partition=p.getPartition(c,HConstants.EMPTY_BYTE_ARRAY,2); assertEquals(0,partition); partition=p.getPartition(c,HConstants.EMPTY_BYTE_ARRAY,3); assertEquals(0,partition); ImmutableBytesWritable q=new ImmutableBytesWritable(Bytes.toBytes("q")); partition=p.getPartition(q,HConstants.EMPTY_BYTE_ARRAY,2); assertEquals(1,partition); partition=p.getPartition(q,HConstants.EMPTY_BYTE_ARRAY,3); assertEquals(2,partition); ImmutableBytesWritable startBytes=new ImmutableBytesWritable(Bytes.toBytes(start)); partition=p.getPartition(startBytes,HConstants.EMPTY_BYTE_ARRAY,2); assertEquals(0,partition); partition=p.getPartition(startBytes,HConstants.EMPTY_BYTE_ARRAY,3); assertEquals(0,partition); ImmutableBytesWritable endBytes=new ImmutableBytesWritable(Bytes.toBytes("z")); partition=p.getPartition(endBytes,HConstants.EMPTY_BYTE_ARRAY,2); assertEquals(1,partition); partition=p.getPartition(endBytes,HConstants.EMPTY_BYTE_ARRAY,3); assertEquals(2,partition); }

Class: org.apache.hadoop.hbase.mapreduce.TestSyncTable

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSyncTable() throws Exception { TableName sourceTableName=TableName.valueOf("testSourceTable"); TableName targetTableName=TableName.valueOf("testTargetTable"); Path testDir=TEST_UTIL.getDataTestDirOnTestFS("testSyncTable"); writeTestData(sourceTableName,targetTableName); hashSourceTable(sourceTableName,testDir); Counters syncCounters=syncTables(sourceTableName,targetTableName,testDir); assertEqualTables(90,sourceTableName,targetTableName); assertEquals(60,syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); assertEquals(10,syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue()); assertEquals(10,syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue()); assertEquals(50,syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue()); assertEquals(50,syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue()); assertEquals(20,syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue()); TEST_UTIL.deleteTable(sourceTableName); TEST_UTIL.deleteTable(targetTableName); TEST_UTIL.cleanupDataTestDirOnTestFS(); }

Class: org.apache.hadoop.hbase.mapreduce.TestTableInputFormatBase

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testTableInputFormatBaseReverseDNSForIPv6() throws UnknownHostException, NamingException { String address="ipv6.google.com"; String localhost=null; InetAddress addr=null; TableInputFormat inputFormat=new TableInputFormat(); try { localhost=InetAddress.getByName(address).getCanonicalHostName(); addr=Inet6Address.getByName(address); } catch ( UnknownHostException e) { return; } System.out.println("Should retrun the hostname for this host " + localhost + " addr : "+ addr); String actualHostName=inputFormat.reverseDNS(addr); assertEquals("Should retrun the hostname for this host. Expected : " + localhost + " Actual : "+ actualHostName,localhost,actualHostName); }

Class: org.apache.hadoop.hbase.mapreduce.TestTableMapReduceUtil

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitTableMapperJob2() throws Exception { Configuration configuration=new Configuration(); Job job=new Job(configuration,"tableName"); TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"),new Scan(),Import.Importer.class,Text.class,Text.class,job,false,WALInputFormat.class); assertEquals(WALInputFormat.class,job.getInputFormatClass()); assertEquals(Import.Importer.class,job.getMapperClass()); assertEquals(LongWritable.class,job.getOutputKeyClass()); assertEquals(Text.class,job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table",job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitTableMapperJob4() throws Exception { Configuration configuration=new Configuration(); Job job=new Job(configuration,"tableName"); TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"),new Scan(),Import.Importer.class,Text.class,Text.class,job,false); assertEquals(TableInputFormat.class,job.getInputFormatClass()); assertEquals(Import.Importer.class,job.getMapperClass()); assertEquals(LongWritable.class,job.getOutputKeyClass()); assertEquals(Text.class,job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table",job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitTableMapperJob3() throws Exception { Configuration configuration=new Configuration(); Job job=new Job(configuration,"tableName"); TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"),new Scan(),Import.Importer.class,Text.class,Text.class,job); assertEquals(TableInputFormat.class,job.getInputFormatClass()); assertEquals(Import.Importer.class,job.getMapperClass()); assertEquals(LongWritable.class,job.getOutputKeyClass()); assertEquals(Text.class,job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table",job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitTableMapperJob1() throws Exception { Configuration configuration=new Configuration(); Job job=new Job(configuration,"tableName"); TableMapReduceUtil.initTableMapperJob("Table",new Scan(),Import.Importer.class,Text.class,Text.class,job,false,WALInputFormat.class); assertEquals(WALInputFormat.class,job.getInputFormatClass()); assertEquals(Import.Importer.class,job.getMapperClass()); assertEquals(LongWritable.class,job.getOutputKeyClass()); assertEquals(Text.class,job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table",job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); }

Class: org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat

InternalCallVerifier EqualityVerifier 
@Test public void testInitTableSnapshotMapperJobConfig() throws Exception { setupCluster(); TableName tableName=TableName.valueOf("testInitTableSnapshotMapperJobConfig"); String snapshotName="foo"; try { createTableAndSnapshot(UTIL,tableName,snapshotName,getStartRow(),getEndRow(),1); Job job=new Job(UTIL.getConfiguration()); Path tmpTableDir=UTIL.getDataTestDirOnTestFS(snapshotName); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,new Scan(),TestTableSnapshotMapper.class,ImmutableBytesWritable.class,NullWritable.class,job,false,tmpTableDir); Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.",HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT,job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,-1),0.01); Assert.assertEquals("Snapshot job should not use BucketCache.",0,job.getConfiguration().getFloat("hbase.bucketcache.size",-1),0.01); } finally { UTIL.getHBaseAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testGetBestLocations() throws IOException { TableSnapshotInputFormatImpl tsif=new TableSnapshotInputFormatImpl(); Configuration conf=UTIL.getConfiguration(); HDFSBlocksDistribution blockDistribution=new HDFSBlocksDistribution(); Assert.assertEquals(Lists.newArrayList(),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[]{"h1"},1); Assert.assertEquals(Lists.newArrayList("h1"),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[]{"h1"},1); Assert.assertEquals(Lists.newArrayList("h1"),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[]{"h2"},1); Assert.assertEquals(Lists.newArrayList("h1"),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); blockDistribution=new HDFSBlocksDistribution(); blockDistribution.addHostsAndBlockWeight(new String[]{"h1"},10); blockDistribution.addHostsAndBlockWeight(new String[]{"h2"},7); blockDistribution.addHostsAndBlockWeight(new String[]{"h3"},5); blockDistribution.addHostsAndBlockWeight(new String[]{"h4"},1); Assert.assertEquals(Lists.newArrayList("h1"),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[]{"h2"},2); Assert.assertEquals(Lists.newArrayList("h1","h2"),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[]{"h2"},3); Assert.assertEquals(Lists.newArrayList("h2","h1"),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[]{"h3"},6); blockDistribution.addHostsAndBlockWeight(new String[]{"h4"},9); Assert.assertEquals(Lists.newArrayList("h2","h3","h4","h1"),TableSnapshotInputFormatImpl.getBestLocations(conf,blockDistribution)); }

Class: org.apache.hadoop.hbase.mapreduce.TestWALPlayer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Simple end-to-end test * @throws Exception */ @Test public void testWALPlayer() throws Exception { final TableName TABLENAME1=TableName.valueOf("testWALPlayer1"); final TableName TABLENAME2=TableName.valueOf("testWALPlayer2"); final byte[] FAMILY=Bytes.toBytes("family"); final byte[] COLUMN1=Bytes.toBytes("c1"); final byte[] COLUMN2=Bytes.toBytes("c2"); final byte[] ROW=Bytes.toBytes("row"); Table t1=TEST_UTIL.createTable(TABLENAME1,FAMILY); Table t2=TEST_UTIL.createTable(TABLENAME2,FAMILY); Put p=new Put(ROW); p.addColumn(FAMILY,COLUMN1,COLUMN1); p.addColumn(FAMILY,COLUMN2,COLUMN2); t1.put(p); Delete d=new Delete(ROW); d.addColumns(FAMILY,COLUMN1); t1.delete(d); WAL log=cluster.getRegionServer(0).getWAL(null); log.rollWriter(); String walInputDir=new Path(cluster.getMaster().getMasterFileSystem().getRootDir(),HConstants.HREGION_LOGDIR_NAME).toString(); Configuration configuration=TEST_UTIL.getConfiguration(); WALPlayer player=new WALPlayer(configuration); String optionName="_test_.name"; configuration.set(optionName,"1000"); player.setupTime(configuration,optionName); assertEquals(1000,configuration.getLong(optionName,0)); assertEquals(0,ToolRunner.run(configuration,player,new String[]{walInputDir,TABLENAME1.getNameAsString(),TABLENAME2.getNameAsString()})); Get g=new Get(ROW); Result r=t2.get(g); assertEquals(1,r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0],COLUMN2)); }

Class: org.apache.hadoop.hbase.mapreduce.TestWALRecordReader

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test partial reads from the log based on passed time range * @throws Exception */ @Test public void testPartialRead() throws Exception { final WALFactory walfactory=new WALFactory(conf,null,getName()); WAL log=walfactory.getWAL(info.getEncodedNameAsBytes(),info.getTable().getNamespace()); long ts=System.currentTimeMillis(); WALEdit edit=new WALEdit(); edit.add(new KeyValue(rowName,family,Bytes.toBytes("1"),ts,value)); log.append(htd,info,getWalKey(ts),edit,true); edit=new WALEdit(); edit.add(new KeyValue(rowName,family,Bytes.toBytes("2"),ts + 1,value)); log.append(htd,info,getWalKey(ts + 1),edit,true); log.sync(); LOG.info("Before 1st WAL roll " + log.toString()); log.rollWriter(); LOG.info("Past 1st WAL roll " + log.toString()); Thread.sleep(1); long ts1=System.currentTimeMillis(); edit=new WALEdit(); edit.add(new KeyValue(rowName,family,Bytes.toBytes("3"),ts1 + 1,value)); log.append(htd,info,getWalKey(ts1 + 1),edit,true); edit=new WALEdit(); edit.add(new KeyValue(rowName,family,Bytes.toBytes("4"),ts1 + 2,value)); log.append(htd,info,getWalKey(ts1 + 2),edit,true); log.sync(); log.shutdown(); walfactory.shutdown(); LOG.info("Closed WAL " + log.toString()); WALInputFormat input=new WALInputFormat(); Configuration jobConf=new Configuration(conf); jobConf.set("mapreduce.input.fileinputformat.inputdir",logDir.toString()); jobConf.setLong(WALInputFormat.END_TIME_KEY,ts); List splits=input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1,splits.size()); testSplit(splits.get(0),Bytes.toBytes("1")); jobConf.setLong(WALInputFormat.START_TIME_KEY,ts + 1); jobConf.setLong(WALInputFormat.END_TIME_KEY,ts1 + 1); splits=input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2,splits.size()); testSplit(splits.get(0),Bytes.toBytes("2")); testSplit(splits.get(1),Bytes.toBytes("3")); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test basic functionality * @throws Exception */ @Test public void testWALRecordReader() throws Exception { final WALFactory walfactory=new WALFactory(conf,null,getName()); WAL log=walfactory.getWAL(info.getEncodedNameAsBytes(),info.getTable().getNamespace()); byte[] value=Bytes.toBytes("value"); final AtomicLong sequenceId=new AtomicLong(0); WALEdit edit=new WALEdit(); edit.add(new KeyValue(rowName,family,Bytes.toBytes("1"),System.currentTimeMillis(),value)); long txid=log.append(htd,info,getWalKey(System.currentTimeMillis()),edit,true); log.sync(txid); Thread.sleep(1); long secondTs=System.currentTimeMillis(); log.rollWriter(); edit=new WALEdit(); edit.add(new KeyValue(rowName,family,Bytes.toBytes("2"),System.currentTimeMillis(),value)); txid=log.append(htd,info,getWalKey(System.currentTimeMillis()),edit,true); log.sync(txid); log.shutdown(); walfactory.shutdown(); long thirdTs=System.currentTimeMillis(); WALInputFormat input=new WALInputFormat(); Configuration jobConf=new Configuration(conf); jobConf.set("mapreduce.input.fileinputformat.inputdir",logDir.toString()); List splits=input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2,splits.size()); testSplit(splits.get(0),Bytes.toBytes("1")); testSplit(splits.get(1),Bytes.toBytes("2")); jobConf.setLong(WALInputFormat.END_TIME_KEY,secondTs - 1); splits=input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1,splits.size()); testSplit(splits.get(0),Bytes.toBytes("1")); jobConf.setLong(WALInputFormat.END_TIME_KEY,Long.MAX_VALUE); jobConf.setLong(WALInputFormat.START_TIME_KEY,thirdTs); splits=input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2,splits.size()); testSplit(splits.get(0)); testSplit(splits.get(1)); }

Class: org.apache.hadoop.hbase.master.TestActiveMasterManager

InternalCallVerifier BooleanVerifier 
@Test public void testRestartMaster() throws IOException, KeeperException { ZooKeeperWatcher zk=new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),"testActiveMasterManagerFromZK",null,true); try { ZKUtil.deleteNode(zk,zk.getMasterAddressZNode()); ZKUtil.deleteNode(zk,zk.clusterStateZNode); } catch ( KeeperException.NoNodeException nne) { } ServerName master=ServerName.valueOf("localhost",1,System.currentTimeMillis()); DummyMaster dummyMaster=new DummyMaster(zk,master); ClusterStatusTracker clusterStatusTracker=dummyMaster.getClusterStatusTracker(); ActiveMasterManager activeMasterManager=dummyMaster.getActiveMasterManager(); assertFalse(activeMasterManager.clusterHasActiveMaster.get()); MonitoredTask status=Mockito.mock(MonitoredTask.class); clusterStatusTracker.setClusterUp(); activeMasterManager.blockUntilBecomingActiveMaster(100,status); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertMaster(zk,master); DummyMaster secondDummyMaster=new DummyMaster(zk,master); ActiveMasterManager secondActiveMasterManager=secondDummyMaster.getActiveMasterManager(); assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get()); activeMasterManager.blockUntilBecomingActiveMaster(100,status); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertMaster(zk,master); }

InternalCallVerifier BooleanVerifier 
/** * Unit tests that uses ZooKeeper but does not use the master-side methods * but rather acts directly on ZK. * @throws Exception */ @Test public void testActiveMasterManagerFromZK() throws Exception { ZooKeeperWatcher zk=new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),"testActiveMasterManagerFromZK",null,true); try { ZKUtil.deleteNode(zk,zk.getMasterAddressZNode()); ZKUtil.deleteNode(zk,zk.clusterStateZNode); } catch ( KeeperException.NoNodeException nne) { } ServerName firstMasterAddress=ServerName.valueOf("localhost",1,System.currentTimeMillis()); ServerName secondMasterAddress=ServerName.valueOf("localhost",2,System.currentTimeMillis()); DummyMaster ms1=new DummyMaster(zk,firstMasterAddress); ActiveMasterManager activeMasterManager=ms1.getActiveMasterManager(); assertFalse(activeMasterManager.clusterHasActiveMaster.get()); ClusterStatusTracker clusterStatusTracker=ms1.getClusterStatusTracker(); clusterStatusTracker.setClusterUp(); activeMasterManager.blockUntilBecomingActiveMaster(100,Mockito.mock(MonitoredTask.class)); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertMaster(zk,firstMasterAddress); WaitToBeMasterThread t=new WaitToBeMasterThread(zk,secondMasterAddress); t.start(); int sleeps=0; while (!t.manager.clusterHasActiveMaster.get() && sleeps < 100) { Thread.sleep(10); sleeps++; } assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertTrue(t.manager.clusterHasActiveMaster.get()); assertFalse(t.isActiveMaster); ms1.stop("stopping first server"); NodeDeletionListener listener=new NodeDeletionListener(zk,zk.getMasterAddressZNode()); zk.registerListener(listener); LOG.info("Deleting master node"); ZKUtil.deleteNode(zk,zk.getMasterAddressZNode()); LOG.info("Waiting for active master manager to be notified"); listener.waitForDeletion(); LOG.info("Master node deleted"); sleeps=0; while (!t.isActiveMaster && sleeps < 100) { Thread.sleep(10); sleeps++; } LOG.debug("Slept " + sleeps + " times"); assertTrue(t.manager.clusterHasActiveMaster.get()); assertTrue(t.isActiveMaster); LOG.info("Deleting master node"); ZKUtil.deleteNode(zk,zk.getMasterAddressZNode()); }

Class: org.apache.hadoop.hbase.master.TestAssignmentListener

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testAssignmentListener() throws IOException, InterruptedException { AssignmentManager am=TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); Admin admin=TEST_UTIL.getHBaseAdmin(); DummyAssignmentListener listener=new DummyAssignmentListener(); am.registerListener(listener); try { final String TABLE_NAME_STR="testtb"; final TableName TABLE_NAME=TableName.valueOf(TABLE_NAME_STR); final byte[] FAMILY=Bytes.toBytes("cf"); LOG.info("Create Table"); TEST_UTIL.createTable(TABLE_NAME,FAMILY); listener.awaitModifications(1); assertEquals(1,listener.getLoadCount()); assertEquals(0,listener.getCloseCount()); Table table=TEST_UTIL.getConnection().getTable(TABLE_NAME); try { for (int i=0; i < 10; ++i) { byte[] key=Bytes.toBytes("row-" + i); Put put=new Put(key); put.addColumn(FAMILY,null,key); table.put(put); } } finally { table.close(); } LOG.info("Split Table"); listener.reset(); admin.split(TABLE_NAME,Bytes.toBytes("row-3")); listener.awaitModifications(3); assertEquals(2,listener.getLoadCount()); assertEquals(1,listener.getCloseCount()); MiniHBaseCluster miniCluster=TEST_UTIL.getMiniHBaseCluster(); int mergeable=0; while (mergeable < 2) { Thread.sleep(100); admin.majorCompact(TABLE_NAME); mergeable=0; for ( JVMClusterUtil.RegionServerThread regionThread : miniCluster.getRegionServerThreads()) { for ( Region region : regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) { mergeable+=((HRegion)region).isMergeable() ? 1 : 0; } } } LOG.info("Merge Regions"); listener.reset(); List regions=admin.getTableRegions(TABLE_NAME); assertEquals(2,regions.size()); admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),regions.get(1).getEncodedNameAsBytes(),true); listener.awaitModifications(3); assertEquals(1,admin.getTableRegions(TABLE_NAME).size()); assertEquals(1,listener.getLoadCount()); assertEquals(2,listener.getCloseCount()); LOG.info("Drop Table"); listener.reset(); TEST_UTIL.deleteTable(TABLE_NAME); listener.awaitModifications(1); assertEquals(0,listener.getLoadCount()); assertEquals(1,listener.getCloseCount()); } finally { am.unregisterListener(listener); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testServerListener() throws IOException, InterruptedException { ServerManager serverManager=TEST_UTIL.getHBaseCluster().getMaster().getServerManager(); DummyServerListener listener=new DummyServerListener(); serverManager.registerListener(listener); try { MiniHBaseCluster miniCluster=TEST_UTIL.getMiniHBaseCluster(); miniCluster.startRegionServer(); listener.awaitModifications(1); assertEquals(1,listener.getAddedCount()); assertEquals(0,listener.getRemovedCount()); listener.reset(); miniCluster.startRegionServer(); listener.awaitModifications(1); assertEquals(1,listener.getAddedCount()); assertEquals(0,listener.getRemovedCount()); int nrs=miniCluster.getRegionServerThreads().size(); listener.reset(); miniCluster.stopRegionServer(nrs - 1); listener.awaitModifications(1); assertEquals(0,listener.getAddedCount()); assertEquals(1,listener.getRemovedCount()); listener.reset(); miniCluster.stopRegionServer(nrs - 2); listener.awaitModifications(1); assertEquals(0,listener.getAddedCount()); assertEquals(1,listener.getRemovedCount()); } finally { serverManager.unregisterListener(listener); } }

Class: org.apache.hadoop.hbase.master.TestAssignmentManagerOnCluster

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test disabled region is ignored by SSH */ @Test(timeout=60000) public void testAssignDisabledRegionBySSH() throws Exception { TableName table=TableName.valueOf("testAssignDisabledRegionBySSH"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); MyMaster master; try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); master=(MyMaster)cluster.getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); RegionStates regionStates=am.getRegionStates(); ServerName metaServer=regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO); ServerName oldServerName=null; while (true) { assertTrue(am.waitForAssignment(hri)); RegionState state=regionStates.getRegionState(hri); oldServerName=state.getServerName(); if (!ServerName.isSameHostnameAndPort(oldServerName,metaServer)) { MyRegionServer.abortedServer=oldServerName; break; } int i=cluster.getServerWithMeta(); HRegionServer rs=cluster.getRegionServer(i == 0 ? 1 : 0); oldServerName=rs.getServerName(); master.move(hri.getEncodedNameAsBytes(),Bytes.toBytes(oldServerName.getServerName())); } assertTrue(regionStates.isRegionOnline(hri)); assertEquals(oldServerName,regionStates.getRegionServerOfRegion(hri)); master.disableTable(hri.getTable(),HConstants.NO_NONCE,HConstants.NO_NONCE); cluster.killRegionServer(oldServerName); cluster.waitForRegionServerToStop(oldServerName,-1); ServerManager serverManager=master.getServerManager(); while (!serverManager.isServerDead(oldServerName) || serverManager.getDeadServers().areDeadServersInProgress()) { Thread.sleep(100); } TEST_UTIL.waitUntilNoRegionsInTransition(60000); assertTrue(regionStates.isRegionOffline(hri)); } finally { MyRegionServer.abortedServer=null; TEST_UTIL.deleteTable(table); cluster.startRegionServer(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState() throws Exception { final TableName table=TableName.valueOf("testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState"); AssignmentManager am=TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); HRegionInfo hri=null; ServerName serverName=null; try { hri=createTableAndGetOneRegion(table); serverName=am.getRegionStates().getRegionServerOfRegion(hri); ServerName destServerName=null; HRegionServer destServer=null; for (int i=0; i < 3; i++) { destServer=TEST_UTIL.getHBaseCluster().getRegionServer(i); if (!destServer.getServerName().equals(serverName)) { destServerName=destServer.getServerName(); break; } } am.regionOffline(hri); am.getRegionStates().updateRegionState(hri,RegionState.State.PENDING_OPEN,destServerName); am.getTableStateManager().setTableState(table,TableState.State.DISABLING); List toAssignRegions=am.cleanOutCrashedServerReferences(destServerName); assertTrue("Regions to be assigned should be empty.",toAssignRegions.isEmpty()); assertTrue("Regions to be assigned should be empty.",am.getRegionStates().getRegionState(hri).isOffline()); } finally { if (hri != null && serverName != null) { am.regionOnline(hri,serverName); } am.getTableStateManager().setTableState(table,TableState.State.ENABLED); TEST_UTIL.getHBaseAdmin().disableTable(table); TEST_UTIL.deleteTable(table); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This tests region open failed */ @Test(timeout=60000) public void testOpenFailed() throws Exception { TableName table=TableName.valueOf("testOpenFailed"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); MyLoadBalancer.controledRegion=hri.getEncodedName(); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); assertFalse(am.waitForAssignment(hri)); RegionState state=am.getRegionStates().getRegionState(hri); assertEquals(RegionState.State.FAILED_OPEN,state.getState()); assertNull(state.getServerName()); MyLoadBalancer.controledRegion=null; master.assignRegion(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,serverName,200); } finally { MyLoadBalancer.controledRegion=null; TEST_UTIL.deleteTable(table); } }

InternalCallVerifier BooleanVerifier 
/** * This tests region close hanging */ @Test(timeout=60000) public void testCloseHang() throws Exception { TableName table=TableName.valueOf("testCloseHang"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); assertTrue(am.waitForAssignment(hri)); ServerName sn=am.getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,sn,6000); MyRegionObserver.postCloseEnabled.set(true); am.unassign(hri); MyRegionObserver.postCloseEnabled.set(false); am.waitOnRegionToClearRegionsInTransition(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,serverName,200); } finally { MyRegionObserver.postCloseEnabled.set(false); TEST_UTIL.deleteTable(table); } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * This tests offlining a region */ @Test(timeout=60000) public void testOfflineRegion() throws Exception { TableName table=TableName.valueOf("testOfflineRegion"); try { HRegionInfo hri=createTableAndGetOneRegion(table); RegionStates regionStates=TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); ServerName serverName=regionStates.getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,serverName,200); admin.offline(hri.getRegionName()); long timeoutTime=System.currentTimeMillis() + 800; while (true) { if (regionStates.getRegionByStateOfTable(table).get(RegionState.State.OFFLINE).contains(hri)) break; long now=System.currentTimeMillis(); if (now > timeoutTime) { fail("Failed to offline the region in time"); break; } Thread.sleep(10); } RegionState regionState=regionStates.getRegionState(hri); assertTrue(regionState.isOffline()); } finally { TEST_UTIL.deleteTable(table); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This tests restarting meta regionserver */ @Test(timeout=180000) public void testRestartMetaRegionServer() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); boolean stoppedARegionServer=false; try { HMaster master=cluster.getMaster(); RegionStates regionStates=master.getAssignmentManager().getRegionStates(); ServerName metaServerName=regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO); if (master.getServerName().equals(metaServerName)) { metaServerName=cluster.getLiveRegionServerThreads().get(0).getRegionServer().getServerName(); master.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),Bytes.toBytes(metaServerName.getServerName())); TEST_UTIL.waitUntilNoRegionsInTransition(60000); } RegionState metaState=MetaTableLocator.getMetaRegionState(master.getZooKeeper()); assertEquals("Meta should be not in transition",metaState.getState(),RegionState.State.OPEN); assertNotEquals("Meta should be moved off master",metaState.getServerName(),master.getServerName()); assertEquals("Meta should be on the meta server",metaState.getServerName(),metaServerName); cluster.killRegionServer(metaServerName); stoppedARegionServer=true; cluster.waitForRegionServerToStop(metaServerName,60000); final ServerName oldServerName=metaServerName; final ServerManager serverManager=master.getServerManager(); TEST_UTIL.waitFor(120000,200,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return serverManager.isServerDead(oldServerName) && !serverManager.areDeadServersInProgress(); } } ); TEST_UTIL.waitUntilNoRegionsInTransition(60000); assertTrue("Meta should be assigned",regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); metaState=MetaTableLocator.getMetaRegionState(master.getZooKeeper()); assertEquals("Meta should be not in transition",metaState.getState(),RegionState.State.OPEN); assertEquals("Meta should be assigned",metaState.getServerName(),regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO)); assertNotEquals("Meta should be assigned on a different server",metaState.getServerName(),metaServerName); } finally { if (stoppedARegionServer) { cluster.startRegionServer(); } } }

InternalCallVerifier BooleanVerifier 
/** * Test force unassign/assign a region of a disabled table */ @Test(timeout=60000) public void testAssignDisabledRegion() throws Exception { TableName table=TableName.valueOf("testAssignDisabledRegion"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); MyMaster master=null; try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); master=(MyMaster)cluster.getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); RegionStates regionStates=am.getRegionStates(); assertTrue(am.waitForAssignment(hri)); admin.disableTable(table); assertTrue(regionStates.isRegionOffline(hri)); am.assign(hri,true); assertTrue(regionStates.isRegionOffline(hri)); am.unassign(hri); assertTrue(regionStates.isRegionOffline(hri)); } finally { TEST_UTIL.deleteTable(table); } }

IterativeVerifier InternalCallVerifier BooleanVerifier 
/** * Test force unassign/assign a region hosted on a dead server */ @Test(timeout=60000) public void testAssignRacingWithSSH() throws Exception { TableName table=TableName.valueOf("testAssignRacingWithSSH"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); MyMaster master=null; try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); master=(MyMaster)cluster.getMaster(); master.assignRegion(hri); master.enableSSH(false); AssignmentManager am=master.getAssignmentManager(); RegionStates regionStates=am.getRegionStates(); ServerName metaServer=regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO); while (true) { assertTrue(am.waitForAssignment(hri)); RegionState state=regionStates.getRegionState(hri); ServerName oldServerName=state.getServerName(); if (!ServerName.isSameHostnameAndPort(oldServerName,metaServer)) { cluster.killRegionServer(oldServerName); cluster.waitForRegionServerToStop(oldServerName,-1); break; } int i=cluster.getServerWithMeta(); HRegionServer rs=cluster.getRegionServer(i == 0 ? 1 : 0); oldServerName=rs.getServerName(); master.move(hri.getEncodedNameAsBytes(),Bytes.toBytes(oldServerName.getServerName())); } am.assign(hri,true); RegionState state=regionStates.getRegionState(hri); assertTrue(state.isFailedClose()); am.unassign(hri); state=regionStates.getRegionState(hri); assertTrue(state.isFailedClose()); master.enableSSH(true); am.waitOnRegionToClearRegionsInTransition(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnlyOnServer(hri,serverName,6000); } finally { if (master != null) { master.enableSSH(true); } TEST_UTIL.deleteTable(table); cluster.startRegionServer(); } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * This tests moving a region */ @Test(timeout=50000) public void testMoveRegion() throws Exception { TableName table=TableName.valueOf("testMoveRegion"); try { HRegionInfo hri=createTableAndGetOneRegion(table); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); RegionStates regionStates=master.getAssignmentManager().getRegionStates(); ServerName serverName=regionStates.getRegionServerOfRegion(hri); ServerManager serverManager=master.getServerManager(); ServerName destServerName=null; List regionServers=TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads(); for ( JVMClusterUtil.RegionServerThread regionServer : regionServers) { HRegionServer destServer=regionServer.getRegionServer(); destServerName=destServer.getServerName(); if (!destServerName.equals(serverName) && serverManager.isServerOnline(destServerName)) { break; } } assertTrue(destServerName != null && !destServerName.equals(serverName)); TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(),Bytes.toBytes(destServerName.getServerName())); long timeoutTime=System.currentTimeMillis() + 30000; while (true) { ServerName sn=regionStates.getRegionServerOfRegion(hri); if (sn != null && sn.equals(destServerName)) { TEST_UTIL.assertRegionOnServer(hri,sn,200); break; } long now=System.currentTimeMillis(); if (now > timeoutTime) { fail("Failed to move the region in time: " + regionStates.getRegionState(hri)); } regionStates.waitForUpdate(50); } } finally { TEST_UTIL.deleteTable(table); } }

InternalCallVerifier BooleanVerifier PublicFieldVerifier 
/** * This tests region assignment on a simulated restarted server */ @Test(timeout=120000) public void testAssignRegionOnRestartedServer() throws Exception { TableName table=TableName.valueOf("testAssignRegionOnRestartedServer"); TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts",20); TEST_UTIL.getMiniHBaseCluster().stopMaster(0); TEST_UTIL.getMiniHBaseCluster().startMaster(); ServerName deadServer=null; HMaster master=null; try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); final HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); master=TEST_UTIL.getHBaseCluster().getMaster(); Set onlineServers=master.serverManager.getOnlineServers().keySet(); assertFalse("There should be some servers online",onlineServers.isEmpty()); ServerName destServer=onlineServers.iterator().next(); deadServer=ServerName.valueOf(destServer.getHostname(),destServer.getPort(),destServer.getStartcode() - 100L); master.serverManager.recordNewServerWithLock(deadServer,ServerLoad.EMPTY_SERVERLOAD); final AssignmentManager am=master.getAssignmentManager(); RegionPlan plan=new RegionPlan(hri,null,deadServer); am.addPlan(hri.getEncodedName(),plan); master.assignRegion(hri); TEST_UTIL.waitFor(60000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return !am.getRegionStates().isRegionInTransition(hri); } } ); assertFalse("Region should be assigned",am.getRegionStates().isRegionInTransition(hri)); } finally { if (deadServer != null) { master.serverManager.expireServer(deadServer); } TEST_UTIL.deleteTable(table); TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts",3); ServerName masterServerName=TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName(); TEST_UTIL.getMiniHBaseCluster().stopMaster(masterServerName); TEST_UTIL.getMiniHBaseCluster().startMaster(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This tests region open failure which is not recoverable */ @Test(timeout=60000) public void testOpenFailedUnrecoverable() throws Exception { TableName table=TableName.valueOf("testOpenFailedUnrecoverable"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); FileSystem fs=FileSystem.get(conf); Path tableDir=FSUtils.getTableDir(FSUtils.getRootDir(conf),table); Path regionDir=new Path(tableDir,hri.getEncodedName()); fs.create(regionDir,true); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); assertFalse(am.waitForAssignment(hri)); RegionState state=am.getRegionStates().getRegionState(hri); assertEquals(RegionState.State.FAILED_OPEN,state.getState()); assertNotNull(state.getServerName()); fs.delete(regionDir,true); master.assignRegion(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,serverName,200); } finally { TEST_UTIL.deleteTable(table); } }

InternalCallVerifier BooleanVerifier 
/** * This tests region assignment */ @Test(timeout=60000) public void testAssignRegion() throws Exception { TableName table=TableName.valueOf("testAssignRegion"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); am.waitForAssignment(hri); RegionStates regionStates=am.getRegionStates(); ServerName serverName=regionStates.getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,serverName,200); TEST_UTIL.getHBaseAdmin().assign(hri.getRegionName()); master.getAssignmentManager().waitForAssignment(hri); RegionState newState=regionStates.getRegionState(hri); assertTrue(newState.isOpened()); } finally { TEST_UTIL.deleteTable(table); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This tests region close failed */ @Test(timeout=60000) public void testCloseFailed() throws Exception { TableName table=TableName.valueOf("testCloseFailed"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); assertTrue(am.waitForAssignment(hri)); ServerName sn=am.getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,sn,6000); MyRegionObserver.preCloseEnabled.set(true); am.unassign(hri); RegionState state=am.getRegionStates().getRegionState(hri); assertEquals(RegionState.State.FAILED_CLOSE,state.getState()); MyRegionObserver.preCloseEnabled.set(false); am.unassign(hri); am.waitOnRegionToClearRegionsInTransition(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,serverName,200); } finally { MyRegionObserver.preCloseEnabled.set(false); TEST_UTIL.deleteTable(table); } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test SSH waiting for extra region server for assignment */ @Test(timeout=300000) public void testSSHWaitForServerToAssignRegion() throws Exception { TableName table=TableName.valueOf("testSSHWaitForServerToAssignRegion"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); boolean startAServer=false; try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); HMaster master=cluster.getMaster(); final ServerManager serverManager=master.getServerManager(); MyLoadBalancer.countRegionServers=Integer.valueOf(serverManager.countOfRegionServers()); HRegionServer rs=TEST_UTIL.getRSForFirstRegionInTable(table); assertNotNull("First region should be assigned",rs); final ServerName serverName=rs.getServerName(); int counter=MyLoadBalancer.counter.get() + 5; cluster.killRegionServer(serverName); startAServer=true; cluster.waitForRegionServerToStop(serverName,-1); while (counter > MyLoadBalancer.counter.get()) { Thread.sleep(1000); } cluster.startRegionServer(); startAServer=false; TEST_UTIL.waitFor(120000,1000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return serverManager.isServerDead(serverName) && !serverManager.areDeadServersInProgress(); } } ); TEST_UTIL.waitUntilNoRegionsInTransition(300000); rs=TEST_UTIL.getRSForFirstRegionInTable(table); assertTrue("First region should be re-assigned to a different server",rs != null && !serverName.equals(rs.getServerName())); } finally { MyLoadBalancer.countRegionServers=null; TEST_UTIL.deleteTable(table); if (startAServer) { cluster.startRegionServer(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test that region state transition call is idempotent */ @Test(timeout=60000) public void testReportRegionStateTransition() throws Exception { TableName table=TableName.valueOf("testReportRegionStateTransition"); try { MyRegionServer.simulateRetry=true; HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); am.waitForAssignment(hri); RegionStates regionStates=am.getRegionStates(); ServerName serverName=regionStates.getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,serverName,200); admin.disableTable(table); assertTrue(regionStates.isRegionOffline(hri)); List regions=TEST_UTIL.getHBaseAdmin().getOnlineRegions(serverName); assertTrue(!regions.contains(hri)); } finally { MyRegionServer.simulateRetry=false; TEST_UTIL.deleteTable(table); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This tests assign a region while it's closing. */ @Test(timeout=60000) public void testAssignWhileClosing() throws Exception { TableName table=TableName.valueOf("testAssignWhileClosing"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); assertTrue(am.waitForAssignment(hri)); ServerName sn=am.getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri,sn,6000); MyRegionObserver.preCloseEnabled.set(true); am.unassign(hri); RegionState state=am.getRegionStates().getRegionState(hri); assertEquals(RegionState.State.FAILED_CLOSE,state.getState()); MyRegionObserver.preCloseEnabled.set(false); am.unassign(hri); am.assign(hri,true); am.waitOnRegionToClearRegionsInTransition(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnlyOnServer(hri,serverName,200); } finally { MyRegionObserver.preCloseEnabled.set(false); TEST_UTIL.deleteTable(table); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * If a table is deleted, we should not be able to move it anymore. * Otherwise, the region will be brought back. * @throws Exception */ @Test(timeout=50000) public void testMoveRegionOfDeletedTable() throws Exception { TableName table=TableName.valueOf("testMoveRegionOfDeletedTable"); Admin admin=TEST_UTIL.getHBaseAdmin(); try { HRegionInfo hri=createTableAndGetOneRegion(table); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); AssignmentManager am=master.getAssignmentManager(); RegionStates regionStates=am.getRegionStates(); ServerName serverName=regionStates.getRegionServerOfRegion(hri); ServerName destServerName=null; for (int i=0; i < 3; i++) { HRegionServer destServer=TEST_UTIL.getHBaseCluster().getRegionServer(i); if (!destServer.getServerName().equals(serverName)) { destServerName=destServer.getServerName(); break; } } assertTrue(destServerName != null && !destServerName.equals(serverName)); TEST_UTIL.deleteTable(table); try { admin.move(hri.getEncodedNameAsBytes(),Bytes.toBytes(destServerName.getServerName())); fail("We should not find the region"); } catch ( IOException ioe) { assertTrue(ioe instanceof UnknownRegionException); } am.balance(new RegionPlan(hri,serverName,destServerName)); assertFalse("The region should not be in transition",regionStates.isRegionInTransition(hri)); } finally { if (admin.tableExists(table)) { TEST_UTIL.deleteTable(table); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * This tests region close racing with open */ @Test(timeout=60000) public void testOpenCloseRacing() throws Exception { TableName table=TableName.valueOf("testOpenCloseRacing"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); meta.close(); MyRegionObserver.postOpenEnabled.set(true); MyRegionObserver.postOpenCalled=false; HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); long end=EnvironmentEdgeManager.currentTime() + 20000; while (!MyRegionObserver.postOpenCalled) { assertFalse("Timed out waiting for postOpen to be called",EnvironmentEdgeManager.currentTime() > end); Thread.sleep(300); } AssignmentManager am=master.getAssignmentManager(); am.unassign(hri); RegionState state=am.getRegionStates().getRegionState(hri); ServerName oldServerName=state.getServerName(); assertTrue(state.isOpening() && oldServerName != null); MyRegionObserver.postOpenEnabled.set(false); ServerName destServerName=null; int numRS=TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size(); for (int i=0; i < numRS; i++) { HRegionServer destServer=TEST_UTIL.getHBaseCluster().getRegionServer(i); if (!destServer.getServerName().equals(oldServerName)) { destServerName=destServer.getServerName(); break; } } assertNotNull(destServerName); assertFalse("Region should be assigned on a new region server",oldServerName.equals(destServerName)); List regions=new ArrayList(); regions.add(hri); am.assign(destServerName,regions); am.waitOnRegionToClearRegionsInTransition(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnlyOnServer(hri,serverName,6000); } finally { MyRegionObserver.postOpenEnabled.set(false); TEST_UTIL.deleteTable(table); } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test offlined region is assigned by SSH */ @Test(timeout=60000) public void testAssignOfflinedRegionBySSH() throws Exception { TableName table=TableName.valueOf("testAssignOfflinedRegionBySSH"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); MyMaster master=null; try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); Table meta=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HRegionInfo hri=new HRegionInfo(desc.getTableName(),Bytes.toBytes("A"),Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta,hri); master=(MyMaster)cluster.getMaster(); master.assignRegion(hri); AssignmentManager am=master.getAssignmentManager(); RegionStates regionStates=am.getRegionStates(); ServerName metaServer=regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO); ServerName oldServerName=null; while (true) { assertTrue(am.waitForAssignment(hri)); RegionState state=regionStates.getRegionState(hri); oldServerName=state.getServerName(); if (!ServerName.isSameHostnameAndPort(oldServerName,metaServer)) { MyRegionServer.abortedServer=oldServerName; break; } int i=cluster.getServerWithMeta(); HRegionServer rs=cluster.getRegionServer(i == 0 ? 1 : 0); oldServerName=rs.getServerName(); master.move(hri.getEncodedNameAsBytes(),Bytes.toBytes(oldServerName.getServerName())); } assertTrue(regionStates.isRegionOnline(hri)); assertEquals(oldServerName,regionStates.getRegionServerOfRegion(hri)); cluster.killRegionServer(oldServerName); cluster.waitForRegionServerToStop(oldServerName,-1); ServerManager serverManager=master.getServerManager(); while (!serverManager.isServerDead(oldServerName) || serverManager.getDeadServers().areDeadServersInProgress()) { Thread.sleep(100); } am.waitOnRegionToClearRegionsInTransition(hri); assertTrue(am.waitForAssignment(hri)); ServerName serverName=master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnlyOnServer(hri,serverName,200); } finally { MyRegionServer.abortedServer=null; TEST_UTIL.deleteTable(table); cluster.startRegionServer(); } }

Class: org.apache.hadoop.hbase.master.TestCatalogJanitor

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test that if a store file with the same name is present as those already backed up cause the * already archived files to be timestamped backup */ @Test public void testDuplicateHFileResolution() throws Exception { String table="table"; HBaseTestingUtility htu=new HBaseTestingUtility(); setRootDirAndCleanIt(htu,"testCleanParent"); Server server=new MockServer(htu); MasterServices services=new MockMasterServices(server); CatalogJanitor janitor=new CatalogJanitor(server,services); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(new HColumnDescriptor("f")); HRegionInfo parent=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("eee")); HRegionInfo splita=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("ccc")); HRegionInfo splitb=new HRegionInfo(htd.getTableName(),Bytes.toBytes("ccc"),Bytes.toBytes("eee")); Result r=createResult(parent,splita,splitb); FileSystem fs=FileSystem.get(htu.getConfiguration()); Path rootdir=services.getMasterFileSystem().getRootDir(); FSUtils.setRootDir(fs.getConf(),rootdir); Path tabledir=FSUtils.getTableDir(rootdir,parent.getTable()); Path storedir=HStore.getStoreHomedir(tabledir,parent,htd.getColumnFamilies()[0].getName()); System.out.println("Old root:" + rootdir); System.out.println("Old table:" + tabledir); System.out.println("Old store:" + storedir); Path storeArchive=HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(),parent,tabledir,htd.getColumnFamilies()[0].getName()); System.out.println("Old archive:" + storeArchive); addMockStoreFiles(2,services,storedir); FileStatus[] storeFiles=fs.listStatus(storedir); assertTrue(janitor.cleanParent(parent,r)); FileStatus[] archivedStoreFiles=fs.listStatus(storeArchive); assertArchiveEqualToOriginal(storeFiles,archivedStoreFiles,fs); addMockStoreFiles(2,services,storedir); assertTrue(janitor.cleanParent(parent,r)); archivedStoreFiles=fs.listStatus(storeArchive); assertArchiveEqualToOriginal(storeFiles,archivedStoreFiles,fs,true); services.stop("Test finished"); server.stop("shutdown"); janitor.cancel(true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCleanParent() throws IOException, InterruptedException { HBaseTestingUtility htu=new HBaseTestingUtility(); setRootDirAndCleanIt(htu,"testCleanParent"); Server server=new MockServer(htu); try { MasterServices services=new MockMasterServices(server); CatalogJanitor janitor=new CatalogJanitor(server,services); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("table")); htd.addFamily(new HColumnDescriptor("f")); HRegionInfo parent=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("eee")); HRegionInfo splita=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("ccc")); HRegionInfo splitb=new HRegionInfo(htd.getTableName(),Bytes.toBytes("ccc"),Bytes.toBytes("eee")); Result r=createResult(parent,splita,splitb); Path rootdir=services.getMasterFileSystem().getRootDir(); Path tabledir=FSUtils.getTableDir(rootdir,htd.getTableName()); Path storedir=HStore.getStoreHomedir(tabledir,splita,htd.getColumnFamilies()[0].getName()); Reference ref=Reference.createTopReference(Bytes.toBytes("ccc")); long now=System.currentTimeMillis(); Path p=new Path(storedir,Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs=services.getMasterFileSystem().getFileSystem(); Path path=ref.write(fs,p); assertTrue(fs.exists(path)); assertFalse(janitor.cleanParent(parent,r)); assertTrue(fs.delete(p,true)); assertTrue(janitor.cleanParent(parent,r)); } finally { server.stop("shutdown"); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * CatalogJanitor.scan() should not clean parent regions if their own * parents are still referencing them. This ensures that grandfather regions * do not point to deleted parent regions. */ @Test public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); setRootDirAndCleanIt(htu,"testScanDoesNotCleanRegionsWithExistingParents"); Server server=new MockServer(htu); MasterServices services=new MockMasterServices(server); final HTableDescriptor htd=createHTableDescriptor(); HRegionInfo parent=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),new byte[0],true); Thread.sleep(1001); HRegionInfo splita=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("ccc"),true); Thread.sleep(1001); HRegionInfo splitaa=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("bbb"),false); HRegionInfo splitab=new HRegionInfo(htd.getTableName(),Bytes.toBytes("bbb"),Bytes.toBytes("ccc"),false); HRegionInfo splitb=new HRegionInfo(htd.getTableName(),Bytes.toBytes("ccc"),new byte[0]); Thread.sleep(1001); final Map splitParents=new TreeMap(new SplitParentFirstComparator()); splitParents.put(parent,createResult(parent,splita,splitb)); splita.setOffline(true); splitParents.put(splita,createResult(splita,splitaa,splitab)); final Map mergedRegions=new TreeMap(); CatalogJanitor janitor=spy(new CatalogJanitor(server,services)); doReturn(new Triple,Map>(10,mergedRegions,splitParents)).when(janitor).getMergedRegionsAndSplitParents(); Path splitaRef=createReferences(services,htd,parent,splita,Bytes.toBytes("ccc"),false); assertEquals(0,janitor.scan()); FileSystem fs=FileSystem.get(htu.getConfiguration()); assertTrue(fs.delete(splitaRef,true)); assertEquals(2,janitor.scan()); services.stop("test finished"); janitor.cancel(true); }

InternalCallVerifier BooleanVerifier 
/** * Test that we correctly archive all the storefiles when a region is deleted * @throws Exception */ @Test public void testSplitParentFirstComparator(){ SplitParentFirstComparator comp=new SplitParentFirstComparator(); final HTableDescriptor htd=createHTableDescriptor(); HRegionInfo rootRegion=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,true); HRegionInfo firstRegion=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_START_ROW,Bytes.toBytes("bbb"),true); HRegionInfo lastRegion=new HRegionInfo(htd.getTableName(),Bytes.toBytes("bbb"),HConstants.EMPTY_END_ROW,true); assertTrue(comp.compare(rootRegion,rootRegion) == 0); assertTrue(comp.compare(firstRegion,firstRegion) == 0); assertTrue(comp.compare(lastRegion,lastRegion) == 0); assertTrue(comp.compare(rootRegion,firstRegion) < 0); assertTrue(comp.compare(rootRegion,lastRegion) < 0); assertTrue(comp.compare(firstRegion,lastRegion) < 0); HRegionInfo firstRegiona=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_START_ROW,Bytes.toBytes("aaa"),true); HRegionInfo firstRegionb=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("bbb"),true); HRegionInfo lastRegiona=new HRegionInfo(htd.getTableName(),Bytes.toBytes("bbb"),Bytes.toBytes("ddd"),true); HRegionInfo lastRegionb=new HRegionInfo(htd.getTableName(),Bytes.toBytes("ddd"),HConstants.EMPTY_END_ROW,true); assertTrue(comp.compare(firstRegiona,firstRegiona) == 0); assertTrue(comp.compare(firstRegionb,firstRegionb) == 0); assertTrue(comp.compare(rootRegion,firstRegiona) < 0); assertTrue(comp.compare(rootRegion,firstRegionb) < 0); assertTrue(comp.compare(firstRegion,firstRegiona) < 0); assertTrue(comp.compare(firstRegion,firstRegionb) < 0); assertTrue(comp.compare(firstRegiona,firstRegionb) < 0); assertTrue(comp.compare(lastRegiona,lastRegiona) == 0); assertTrue(comp.compare(lastRegionb,lastRegionb) == 0); assertTrue(comp.compare(rootRegion,lastRegiona) < 0); assertTrue(comp.compare(rootRegion,lastRegionb) < 0); assertTrue(comp.compare(lastRegion,lastRegiona) < 0); assertTrue(comp.compare(lastRegion,lastRegionb) < 0); assertTrue(comp.compare(lastRegiona,lastRegionb) < 0); assertTrue(comp.compare(firstRegiona,lastRegiona) < 0); assertTrue(comp.compare(firstRegiona,lastRegionb) < 0); assertTrue(comp.compare(firstRegionb,lastRegiona) < 0); assertTrue(comp.compare(firstRegionb,lastRegionb) < 0); HRegionInfo lastRegionaa=new HRegionInfo(htd.getTableName(),Bytes.toBytes("bbb"),Bytes.toBytes("ccc"),false); HRegionInfo lastRegionab=new HRegionInfo(htd.getTableName(),Bytes.toBytes("ccc"),Bytes.toBytes("ddd"),false); assertTrue(comp.compare(lastRegiona,lastRegionaa) < 0); assertTrue(comp.compare(lastRegiona,lastRegionab) < 0); assertTrue(comp.compare(lastRegionaa,lastRegionab) < 0); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testArchiveOldRegion() throws Exception { String table="table"; HBaseTestingUtility htu=new HBaseTestingUtility(); setRootDirAndCleanIt(htu,"testCleanParent"); Server server=new MockServer(htu); MasterServices services=new MockMasterServices(server); CatalogJanitor janitor=new CatalogJanitor(server,services); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(new HColumnDescriptor("f")); HRegionInfo parent=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("eee")); HRegionInfo splita=new HRegionInfo(htd.getTableName(),Bytes.toBytes("aaa"),Bytes.toBytes("ccc")); HRegionInfo splitb=new HRegionInfo(htd.getTableName(),Bytes.toBytes("ccc"),Bytes.toBytes("eee")); Result parentMetaRow=createResult(parent,splita,splitb); FileSystem fs=FileSystem.get(htu.getConfiguration()); Path rootdir=services.getMasterFileSystem().getRootDir(); FSUtils.setRootDir(fs.getConf(),rootdir); Path tabledir=FSUtils.getTableDir(rootdir,htd.getTableName()); Path storedir=HStore.getStoreHomedir(tabledir,parent,htd.getColumnFamilies()[0].getName()); Path storeArchive=HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(),parent,tabledir,htd.getColumnFamilies()[0].getName()); LOG.debug("Table dir:" + tabledir); LOG.debug("Store dir:" + storedir); LOG.debug("Store archive dir:" + storeArchive); FileStatus[] mockFiles=addMockStoreFiles(2,services,storedir); FileStatus[] storeFiles=fs.listStatus(storedir); int index=0; for ( FileStatus file : storeFiles) { LOG.debug("Have store file:" + file.getPath()); assertEquals("Got unexpected store file",mockFiles[index].getPath(),storeFiles[index].getPath()); index++; } assertTrue(janitor.cleanParent(parent,parentMetaRow)); LOG.debug("Finished cleanup of parent region"); FileStatus[] archivedStoreFiles=fs.listStatus(storeArchive); logFiles("archived files",storeFiles); logFiles("archived files",archivedStoreFiles); assertArchiveEqualToOriginal(storeFiles,archivedStoreFiles,fs); FSUtils.delete(fs,rootdir,true); services.stop("Test finished"); server.stop("Test finished"); janitor.cancel(true); }

Class: org.apache.hadoop.hbase.master.TestClusterStatusPublisher

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMaxSend(){ ClusterStatusPublisher csp=new ClusterStatusPublisher(){ @Override protected List> getDeadServers( long since){ List> res=new ArrayList>(); switch ((int)EnvironmentEdgeManager.currentTime()) { case 2: res.add(new Pair(ServerName.valueOf("hn",10,10),1L)); break; case 1000: break; } return res; } } ; mee.setValue(2); for (int i=0; i < ClusterStatusPublisher.NB_SEND; i++) { Assert.assertEquals("i=" + i,1,csp.generateDeadServersListToSend().size()); } mee.setValue(1000); Assert.assertTrue(csp.generateDeadServersListToSend().isEmpty()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testOrder(){ ClusterStatusPublisher csp=new ClusterStatusPublisher(){ @Override protected List> getDeadServers( long since){ List> res=new ArrayList>(); for (int i=0; i < 25; i++) { res.add(new Pair(ServerName.valueOf("hn" + i,10,10),20L)); } return res; } } ; mee.setValue(3); List allSNS=csp.generateDeadServersListToSend(); Assert.assertEquals(10,ClusterStatusPublisher.MAX_SERVER_PER_MESSAGE); Assert.assertEquals(10,allSNS.size()); List nextMes=csp.generateDeadServersListToSend(); Assert.assertEquals(10,nextMes.size()); for ( ServerName sn : nextMes) { if (!allSNS.contains(sn)) { allSNS.add(sn); } } Assert.assertEquals(20,allSNS.size()); nextMes=csp.generateDeadServersListToSend(); Assert.assertEquals(10,nextMes.size()); for ( ServerName sn : nextMes) { if (!allSNS.contains(sn)) { allSNS.add(sn); } } Assert.assertEquals(25,allSNS.size()); nextMes=csp.generateDeadServersListToSend(); Assert.assertEquals(10,nextMes.size()); for ( ServerName sn : nextMes) { if (!allSNS.contains(sn)) { allSNS.add(sn); } } Assert.assertEquals(25,allSNS.size()); }

Class: org.apache.hadoop.hbase.master.TestDeadServer

InternalCallVerifier BooleanVerifier 
@Test public void testClean(){ DeadServer d=new DeadServer(); d.add(hostname123); d.cleanPreviousInstance(hostname12345); Assert.assertFalse(d.isEmpty()); d.cleanPreviousInstance(hostname1234); Assert.assertFalse(d.isEmpty()); d.cleanPreviousInstance(hostname123_2); Assert.assertTrue(d.isEmpty()); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=15000) public void testCrashProcedureReplay(){ HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); ProcedureExecutor pExecutor=master.getMasterProcedureExecutor(); ServerCrashProcedure proc=new ServerCrashProcedure(hostname123,false,false); ProcedureTestingUtility.submitAndWait(pExecutor,proc); assertFalse(master.getServerManager().getDeadServers().areDeadServersInProgress()); }

InternalCallVerifier EqualityVerifier 
@Test public void testSortExtract(){ ManualEnvironmentEdge mee=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(mee); mee.setValue(1); DeadServer d=new DeadServer(); d.add(hostname123); mee.incValue(1); d.add(hostname1234); mee.incValue(1); d.add(hostname12345); List> copy=d.copyDeadServersSince(2L); Assert.assertEquals(2,copy.size()); Assert.assertEquals(hostname1234,copy.get(0).getFirst()); Assert.assertEquals(new Long(2L),copy.get(0).getSecond()); Assert.assertEquals(hostname12345,copy.get(1).getFirst()); Assert.assertEquals(new Long(3L),copy.get(1).getSecond()); EnvironmentEdgeManager.reset(); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testIsDead(){ DeadServer ds=new DeadServer(); ds.add(hostname123); ds.notifyServer(hostname123); assertTrue(ds.areDeadServersInProgress()); ds.finish(hostname123); assertFalse(ds.areDeadServersInProgress()); ds.add(hostname1234); ds.notifyServer(hostname1234); assertTrue(ds.areDeadServersInProgress()); ds.finish(hostname1234); assertFalse(ds.areDeadServersInProgress()); ds.add(hostname12345); ds.notifyServer(hostname12345); assertTrue(ds.areDeadServersInProgress()); ds.finish(hostname12345); assertFalse(ds.areDeadServersInProgress()); final ServerName deadServer=ServerName.valueOf("127.0.0.1",9090,112321L); assertFalse(ds.cleanPreviousInstance(deadServer)); ds.add(deadServer); assertTrue(ds.isDeadServer(deadServer)); Set deadServerNames=ds.copyServerNames(); for ( ServerName eachDeadServer : deadServerNames) { Assert.assertNotNull(ds.getTimeOfDeath(eachDeadServer)); } final ServerName deadServerHostComingAlive=ServerName.valueOf("127.0.0.1",9090,223341L); assertTrue(ds.cleanPreviousInstance(deadServerHostComingAlive)); assertFalse(ds.isDeadServer(deadServer)); assertFalse(ds.cleanPreviousInstance(deadServerHostComingAlive)); }

Class: org.apache.hadoop.hbase.master.TestGetLastFlushedSequenceId

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void test() throws IOException, InterruptedException { testUtil.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).build()); Table table=testUtil.createTable(tableName,families); table.put(new Put(Bytes.toBytes("k")).addColumn(family,Bytes.toBytes("q"),Bytes.toBytes("v"))); MiniHBaseCluster cluster=testUtil.getMiniHBaseCluster(); List rsts=cluster.getRegionServerThreads(); Region region=null; for (int i=0; i < cluster.getRegionServerThreads().size(); i++) { HRegionServer hrs=rsts.get(i).getRegionServer(); for ( Region r : hrs.getOnlineRegions(tableName)) { region=r; break; } } assertNotNull(region); Thread.sleep(2000); RegionStoreSequenceIds ids=testUtil.getHBaseCluster().getMaster().getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(HConstants.NO_SEQNUM,ids.getLastFlushedSequenceId()); long storeSequenceId=ids.getStoreSequenceId(0).getSequenceId(); assertTrue(storeSequenceId > 0); testUtil.getHBaseAdmin().flush(tableName); Thread.sleep(2000); ids=testUtil.getHBaseCluster().getMaster().getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId,ids.getLastFlushedSequenceId() > storeSequenceId); assertEquals(ids.getLastFlushedSequenceId(),ids.getStoreSequenceId(0).getSequenceId()); table.close(); }

Class: org.apache.hadoop.hbase.master.TestHMasterCommandLine

InternalCallVerifier EqualityVerifier 
@Test public void testRun() throws Exception { HMasterCommandLine masterCommandLine=new HMasterCommandLine(HMaster.class); masterCommandLine.setConf(TESTING_UTIL.getConfiguration()); assertEquals(0,masterCommandLine.run(new String[]{"clear"})); }

Class: org.apache.hadoop.hbase.master.TestHMasterRPCException

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testRPCException() throws IOException, InterruptedException, KeeperException { ServerName sm=master.getServerName(); boolean fakeZNodeDelete=false; for (int i=0; i < 20; i++) { try { BlockingRpcChannel channel=rpcClient.createBlockingRpcChannel(sm,User.getCurrent(),0); MasterProtos.MasterService.BlockingInterface stub=MasterProtos.MasterService.newBlockingStub(channel); assertTrue(stub.isMasterRunning(null,IsMasterRunningRequest.getDefaultInstance()).getIsMasterRunning()); return; } catch ( ServiceException ex) { IOException ie=ProtobufUtil.getRemoteException(ex); assertTrue(ie.getMessage().startsWith("org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet")); LOG.info("Expected exception: ",ie); if (!fakeZNodeDelete) { testUtil.getZooKeeperWatcher().getRecoverableZooKeeper().delete(testUtil.getZooKeeperWatcher().getMasterAddressZNode(),-1); fakeZNodeDelete=true; } } Thread.sleep(1000); } }

Class: org.apache.hadoop.hbase.master.TestMaster

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("deprecation") public void testMasterOpsWhileSplitting() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HMaster m=cluster.getMaster(); try (Table ht=TEST_UTIL.createTable(TABLENAME,FAMILYNAME)){ assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME,TableState.State.ENABLED)); TEST_UTIL.loadTable(ht,FAMILYNAME,false); } List> tableRegions=MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(),TABLENAME); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(1,tableRegions.size()); assertArrayEquals(HConstants.EMPTY_START_ROW,tableRegions.get(0).getFirst().getStartKey()); assertArrayEquals(HConstants.EMPTY_END_ROW,tableRegions.get(0).getFirst().getEndKey()); LOG.info("Splitting table"); TEST_UTIL.getHBaseAdmin().split(TABLENAME); LOG.info("Waiting for split result to be about to open"); RegionStates regionStates=m.assignmentManager.getRegionStates(); while (regionStates.getRegionsOfTable(TABLENAME).size() <= 1) { Thread.sleep(100); } LOG.info("Making sure we can call getTableRegions while opening"); tableRegions=MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(),TABLENAME,false); LOG.info("Regions: " + Joiner.on(',').join(tableRegions)); assertEquals(3,tableRegions.size()); LOG.info("Making sure we can call getTableRegionClosest while opening"); Pair pair=m.getTableRegionForRow(TABLENAME,Bytes.toBytes("cde")); LOG.info("Result is: " + pair); Pair tableRegionFromName=MetaTableAccessor.getRegion(m.getConnection(),pair.getFirst().getRegionName()); assertEquals(tableRegionFromName.getFirst(),pair.getFirst()); }

Class: org.apache.hadoop.hbase.master.TestMasterFailover

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Simple test of master failover. *

* Starts with three masters. Kills a backup master. Then kills the active * master. Ensures the final master becomes active and we can still contact * the cluster. * @throws Exception */ @Test(timeout=240000) public void testSimpleMasterFailover() throws Exception { final int NUM_MASTERS=3; final int NUM_RS=3; HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); List masterThreads=cluster.getMasterThreads(); for ( MasterThread mt : masterThreads) { assertTrue(mt.isAlive()); } int numActive=0; int activeIndex=-1; ServerName activeName=null; HMaster active=null; for (int i=0; i < masterThreads.size(); i++) { if (masterThreads.get(i).getMaster().isActiveMaster()) { numActive++; activeIndex=i; active=masterThreads.get(activeIndex).getMaster(); activeName=active.getServerName(); } } assertEquals(1,numActive); assertEquals(NUM_MASTERS,masterThreads.size()); LOG.info("Active master " + activeName); assertNotNull(active); ClusterStatus status=active.getClusterStatus(); assertTrue(status.getMaster().equals(activeName)); assertEquals(2,status.getBackupMastersSize()); assertEquals(2,status.getBackupMasters().size()); int backupIndex=(activeIndex == 0 ? 1 : activeIndex - 1); HMaster master=cluster.getMaster(backupIndex); LOG.debug("\n\nStopping a backup master: " + master.getServerName() + "\n"); cluster.stopMaster(backupIndex,false); cluster.waitOnMaster(backupIndex); for (int i=0; i < masterThreads.size(); i++) { if (masterThreads.get(i).getMaster().isActiveMaster()) { assertTrue(activeName.equals(masterThreads.get(i).getMaster().getServerName())); activeIndex=i; active=masterThreads.get(activeIndex).getMaster(); } } assertEquals(1,numActive); assertEquals(2,masterThreads.size()); int rsCount=masterThreads.get(activeIndex).getMaster().getClusterStatus().getServersSize(); LOG.info("Active master " + active.getServerName() + " managing "+ rsCount+ " regions servers"); assertEquals(4,rsCount); assertNotNull(active); status=active.getClusterStatus(); assertTrue(status.getMaster().equals(activeName)); assertEquals(1,status.getBackupMastersSize()); assertEquals(1,status.getBackupMasters().size()); LOG.debug("\n\nStopping the active master " + active.getServerName() + "\n"); cluster.stopMaster(activeIndex,false); cluster.waitOnMaster(activeIndex); assertTrue(cluster.waitForActiveAndReadyMaster()); LOG.debug("\n\nVerifying backup master is now active\n"); assertEquals(1,masterThreads.size()); active=masterThreads.get(0).getMaster(); assertNotNull(active); status=active.getClusterStatus(); ServerName mastername=status.getMaster(); assertTrue(mastername.equals(active.getServerName())); assertTrue(active.isActiveMaster()); assertEquals(0,status.getBackupMastersSize()); assertEquals(0,status.getBackupMasters().size()); int rss=status.getServersSize(); LOG.info("Active master " + mastername.getServerName() + " managing "+ rss+ " region servers"); assertEquals(4,rss); TEST_UTIL.shutdownMiniCluster(); }


APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test region in pending_open/close when master failover */ @Test(timeout=180000) public void testPendingOpenOrCloseWhenMasterFailover() throws Exception { final int NUM_MASTERS=1; final int NUM_RS=1; Configuration conf=HBaseConfiguration.create(); HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(conf); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); log("Cluster started"); List masterThreads=cluster.getMasterThreads(); assertEquals(1,masterThreads.size()); assertTrue(cluster.waitForActiveAndReadyMaster()); HMaster master=masterThreads.get(0).getMaster(); assertTrue(master.isActiveMaster()); assertTrue(master.isInitialized()); Table onlineTable=TEST_UTIL.createTable(TableName.valueOf("onlineTable"),"family"); onlineTable.close(); HTableDescriptor offlineTable=new HTableDescriptor(TableName.valueOf(Bytes.toBytes("offlineTable"))); offlineTable.addFamily(new HColumnDescriptor(Bytes.toBytes("family"))); FileSystem filesystem=FileSystem.get(conf); Path rootdir=FSUtils.getRootDir(conf); FSTableDescriptors fstd=new FSTableDescriptors(conf,filesystem,rootdir); fstd.createTableDescriptor(offlineTable); HRegionInfo hriOffline=new HRegionInfo(offlineTable.getTableName(),null,null); createRegion(hriOffline,rootdir,conf,offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(),hriOffline); log("Regions in hbase:meta and namespace have been created"); assertEquals(3,cluster.countServedRegions()); HRegionInfo hriOnline=null; try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(TableName.valueOf("onlineTable"))){ hriOnline=locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo(); } RegionStates regionStates=master.getAssignmentManager().getRegionStates(); RegionStateStore stateStore=master.getAssignmentManager().getRegionStateStore(); RegionState oldState=regionStates.getRegionState(hriOnline); RegionState newState=new RegionState(hriOnline,State.PENDING_CLOSE,oldState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM,newState,oldState); oldState=new RegionState(hriOffline,State.OFFLINE); newState=new RegionState(hriOffline,State.PENDING_OPEN,newState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM,newState,oldState); HRegionInfo failedClose=new HRegionInfo(offlineTable.getTableName(),null,null); createRegion(failedClose,rootdir,conf,offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(),failedClose); oldState=new RegionState(failedClose,State.PENDING_CLOSE); newState=new RegionState(failedClose,State.FAILED_CLOSE,newState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM,newState,oldState); HRegionInfo failedOpen=new HRegionInfo(offlineTable.getTableName(),null,null); createRegion(failedOpen,rootdir,conf,offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(),failedOpen); oldState=new RegionState(failedOpen,State.PENDING_OPEN); newState=new RegionState(failedOpen,State.FAILED_OPEN,newState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM,newState,oldState); HRegionInfo failedOpenNullServer=new HRegionInfo(offlineTable.getTableName(),null,null); LOG.info("Failed open NUll server " + failedOpenNullServer.getEncodedName()); createRegion(failedOpenNullServer,rootdir,conf,offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(),failedOpenNullServer); oldState=new RegionState(failedOpenNullServer,State.OFFLINE); newState=new RegionState(failedOpenNullServer,State.FAILED_OPEN,null); stateStore.updateRegionState(HConstants.NO_SEQNUM,newState,oldState); log("Aborting master"); cluster.abortMaster(0); cluster.waitOnMaster(0); log("Master has aborted"); log("Starting up a new master"); master=cluster.startMaster().getMaster(); log("Waiting for master to be ready"); cluster.waitForActiveAndReadyMaster(); log("Master is ready"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); regionStates=master.getAssignmentManager().getRegionStates(); assertTrue(regionStates.isRegionOnline(hriOffline)); assertTrue(regionStates.isRegionOnline(hriOnline)); assertTrue(regionStates.isRegionOnline(failedClose)); assertTrue(regionStates.isRegionOnline(failedOpenNullServer)); assertTrue(regionStates.isRegionOnline(failedOpen)); log("Done with verification, shutting down cluster"); TEST_UTIL.shutdownMiniCluster(); }

InternalCallVerifier EqualityVerifier 
/** * Test meta in transition when master failover */ @Test(timeout=180000) public void testMetaInTransitionWhenMasterFailover() throws Exception { final int NUM_MASTERS=1; final int NUM_RS=1; HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); log("Cluster started"); log("Moving meta off the master"); HMaster activeMaster=cluster.getMaster(); HRegionServer rs=cluster.getRegionServer(0); ServerName metaServerName=cluster.getLiveRegionServerThreads().get(0).getRegionServer().getServerName(); activeMaster.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),Bytes.toBytes(metaServerName.getServerName())); TEST_UTIL.waitUntilNoRegionsInTransition(60000); assertEquals("Meta should be assigned on expected regionserver",metaServerName,activeMaster.getMetaTableLocator().getMetaRegionLocation(activeMaster.getZooKeeper())); log("Aborting master"); activeMaster.abort("test-kill"); cluster.waitForMasterToStop(activeMaster.getServerName(),30000); log("Master has aborted"); RegionState metaState=MetaTableLocator.getMetaRegionState(rs.getZooKeeper()); assertEquals("hbase:meta should be onlined on RS",metaState.getServerName(),rs.getServerName()); assertEquals("hbase:meta should be onlined on RS",metaState.getState(),State.OPEN); log("Starting up a new master"); activeMaster=cluster.startMaster().getMaster(); log("Waiting for master to be ready"); cluster.waitForActiveAndReadyMaster(); log("Master is ready"); metaState=MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper()); assertEquals("hbase:meta should be onlined on RS",metaState.getServerName(),rs.getServerName()); assertEquals("hbase:meta should be onlined on RS",metaState.getState(),State.OPEN); MetaTableLocator.setMetaLocation(activeMaster.getZooKeeper(),rs.getServerName(),State.PENDING_OPEN); Region meta=rs.getFromOnlineRegions(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); rs.removeFromOnlineRegions(meta,null); ((HRegion)meta).close(); log("Aborting master"); activeMaster.abort("test-kill"); cluster.waitForMasterToStop(activeMaster.getServerName(),30000); log("Master has aborted"); log("Starting up a new master"); activeMaster=cluster.startMaster().getMaster(); log("Waiting for master to be ready"); cluster.waitForActiveAndReadyMaster(); log("Master is ready"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Meta was assigned"); metaState=MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper()); assertEquals("hbase:meta should be onlined on RS",metaState.getServerName(),rs.getServerName()); assertEquals("hbase:meta should be onlined on RS",metaState.getState(),State.OPEN); MetaTableLocator.setMetaLocation(activeMaster.getZooKeeper(),rs.getServerName(),State.PENDING_CLOSE); log("Aborting master"); activeMaster.abort("test-kill"); cluster.waitForMasterToStop(activeMaster.getServerName(),30000); log("Master has aborted"); rs.getRSRpcServices().closeRegion(null,RequestConverter.buildCloseRegionRequest(rs.getServerName(),HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())); log("Starting up a new master"); activeMaster=cluster.startMaster().getMaster(); log("Waiting for master to be ready"); cluster.waitForActiveAndReadyMaster(); log("Master is ready"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Meta was assigned"); TEST_UTIL.shutdownMiniCluster(); }

Class: org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test that if the master fails, the load balancer maintains its * state (running or not) when the next master takes over * @throws Exception */ @Test(timeout=240000) public void testMasterFailoverBalancerPersistence() throws Exception { final int NUM_MASTERS=3; final int NUM_RS=1; HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); assertTrue(cluster.waitForActiveAndReadyMaster()); HMaster active=cluster.getMaster(); ClusterStatus clusterStatus=active.getClusterStatus(); assertTrue(clusterStatus.isBalancerOn()); active=killActiveAndWaitForNewActive(cluster); clusterStatus=active.getClusterStatus(); assertTrue(clusterStatus.isBalancerOn()); active.balanceSwitch(false); active=killActiveAndWaitForNewActive(cluster); clusterStatus=active.getClusterStatus(); assertFalse(clusterStatus.isBalancerOn()); TEST_UTIL.shutdownMiniCluster(); }

Class: org.apache.hadoop.hbase.master.TestMasterFileSystem

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testFsUriSetProperly() throws Exception { HMaster master=UTIL.getMiniHBaseCluster().getMaster(); MasterFileSystem fs=master.getMasterFileSystem(); Path masterRoot=FSUtils.getRootDir(fs.conf); Path rootDir=FSUtils.getRootDir(fs.getFileSystem().getConf()); LOG.debug("from fs uri:" + FileSystem.getDefaultUri(fs.getFileSystem().getConf())); LOG.debug("from configuration uri:" + FileSystem.getDefaultUri(fs.conf)); assertEquals(masterRoot,rootDir); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test public void testRemoveStaleRecoveringRegionsDuringMasterInitialization() throws Exception { if (!UTIL.getConfiguration().getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,false)) return; LOG.info("Starting testRemoveStaleRecoveringRegionsDuringMasterInitialization"); HMaster master=UTIL.getMiniHBaseCluster().getMaster(); MasterFileSystem fs=master.getMasterFileSystem(); String failedRegion="failedRegoin1"; String staleRegion="staleRegion"; ServerName inRecoveryServerName=ServerName.valueOf("mgr,1,1"); ServerName previouselyFaildServerName=ServerName.valueOf("previous,1,1"); String walPath="/hbase/data/.logs/" + inRecoveryServerName.getServerName() + "-splitting/test"; ZooKeeperWatcher zkw=HBaseTestingUtility.getZooKeeperWatcher(UTIL); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw,walPath),new SplitLogTask.Owned(inRecoveryServerName,fs.getLogRecoveryMode()).toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); String staleRegionPath=ZKUtil.joinZNode(zkw.recoveringRegionsZNode,staleRegion); ZKUtil.createWithParents(zkw,staleRegionPath); String inRecoveringRegionPath=ZKUtil.joinZNode(zkw.recoveringRegionsZNode,failedRegion); inRecoveringRegionPath=ZKUtil.joinZNode(inRecoveringRegionPath,inRecoveryServerName.getServerName()); ZKUtil.createWithParents(zkw,inRecoveringRegionPath); Set servers=new HashSet(); servers.add(previouselyFaildServerName); fs.removeStaleRecoveringRegionsFromZK(servers); assertFalse(ZKUtil.checkExists(zkw,staleRegionPath) != -1); assertTrue(ZKUtil.checkExists(zkw,inRecoveringRegionPath) != -1); ZKUtil.deleteChildrenRecursively(zkw,zkw.recoveringRegionsZNode); ZKUtil.deleteChildrenRecursively(zkw,zkw.splitLogZNode); zkw.close(); }

Class: org.apache.hadoop.hbase.master.TestMasterMetricsWrapper

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testInfo(){ HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); MetricsMasterWrapperImpl info=new MetricsMasterWrapperImpl(master); assertEquals(master.getSplitPlanCount(),info.getSplitPlanCount(),0); assertEquals(master.getMergePlanCount(),info.getMergePlanCount(),0); assertEquals(master.getAverageLoad(),info.getAverageLoad(),0); assertEquals(master.getClusterId(),info.getClusterId()); assertEquals(master.getMasterActiveTime(),info.getActiveTime()); assertEquals(master.getMasterStartTime(),info.getStartTime()); assertEquals(master.getMasterCoprocessors().length,info.getCoprocessors().length); assertEquals(master.getServerManager().getOnlineServersList().size(),info.getNumRegionServers()); assertEquals(5,info.getNumRegionServers()); String zkServers=info.getZookeeperQuorum(); assertEquals(zkServers.split(",").length,TEST_UTIL.getZkCluster().getZooKeeperServerNum()); final int index=3; LOG.info("Stopping " + TEST_UTIL.getMiniHBaseCluster().getRegionServer(index)); TEST_UTIL.getMiniHBaseCluster().stopRegionServer(index,false); TEST_UTIL.getMiniHBaseCluster().waitOnRegionServer(index); while (TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() != 4) { Threads.sleep(10); } assertEquals(4,info.getNumRegionServers()); assertEquals(1,info.getNumDeadRegionServers()); assertEquals(1,info.getNumWALFiles()); }

Class: org.apache.hadoop.hbase.master.TestMasterNoCluster

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testNotPullingDeadRegionServerFromZK() throws IOException, KeeperException, InterruptedException { final Configuration conf=TESTUTIL.getConfiguration(); final ServerName newServer=ServerName.valueOf("test.sample",1,101); final ServerName deadServer=ServerName.valueOf("test.sample",1,100); final MockRegionServer rs0=new MockRegionServer(conf,newServer); CoordinatedStateManager cp=CoordinatedStateManagerFactory.getCoordinatedStateManager(TESTUTIL.getConfiguration()); HMaster master=new HMaster(conf,cp){ @Override void assignMeta( MonitoredTask status, Set previouslyFailedMeatRSs, int replicaId){ } @Override void initClusterSchemaService() throws IOException, InterruptedException { } @Override void initializeZKBasedSystemTrackers() throws IOException, InterruptedException, KeeperException, CoordinatedStateException { super.initializeZKBasedSystemTrackers(); serverManager.recordNewServerWithLock(newServer,ServerLoad.EMPTY_SERVERLOAD); List onlineServers=new ArrayList(); onlineServers.add(deadServer); onlineServers.add(newServer); regionServerTracker=Mockito.spy(regionServerTracker); Mockito.doReturn(onlineServers).when(regionServerTracker).getOnlineServers(); } @Override public ClusterConnection getConnection(){ try { return HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(),rs0,rs0,rs0.getServerName(),HRegionInfo.FIRST_META_REGIONINFO); } catch ( IOException e) { return null; } } } ; master.start(); try { while (!master.isInitialized()) Threads.sleep(10); LOG.info("Master is initialized"); assertFalse("The dead server should not be pulled in",master.serverManager.isServerOnline(deadServer)); } finally { master.stopMaster(); master.join(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test master failover. * Start up three fake regionservers and a master. * @throws IOException * @throws KeeperException * @throws InterruptedException */ @Test public void testFailover() throws IOException, KeeperException, InterruptedException, ServiceException { final long now=System.currentTimeMillis(); final ServerName sn0=ServerName.valueOf("0.example.org",0,now); final ServerName sn1=ServerName.valueOf("1.example.org",1,now); final ServerName sn2=ServerName.valueOf("2.example.org",2,now); final ServerName[] sns=new ServerName[]{sn0,sn1,sn2}; final Configuration conf=TESTUTIL.getConfiguration(); final MockRegionServer rs0=new MockRegionServer(conf,sn0); final MockRegionServer rs1=new MockRegionServer(conf,sn1); final MockRegionServer rs2=new MockRegionServer(conf,sn2); MetaTableLocator.setMetaLocation(rs0.getZooKeeper(),rs0.getServerName(),RegionState.State.OPEN); final TableName tableName=TableName.valueOf("t"); Result[] results=new Result[]{MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName,HConstants.EMPTY_START_ROW,HBaseTestingUtility.KEYS[1]),rs2.getServerName()),MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName,HBaseTestingUtility.KEYS[1],HBaseTestingUtility.KEYS[2]),rs2.getServerName()),MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName,HBaseTestingUtility.KEYS[2],HConstants.EMPTY_END_ROW),rs2.getServerName())}; rs1.setNextResults(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),results); CoordinatedStateManager cp=CoordinatedStateManagerFactory.getCoordinatedStateManager(TESTUTIL.getConfiguration()); HMaster master=new HMaster(conf,cp){ InetAddress getRemoteInetAddress( final int port, final long serverStartCode) throws UnknownHostException { if (port > sns.length) { return super.getRemoteInetAddress(port,serverStartCode); } ServerName sn=sns[port]; return InetAddress.getByAddress(sn.getHostname(),new byte[]{10,0,0,(byte)sn.getPort()}); } @Override void initClusterSchemaService() throws IOException, InterruptedException { } @Override ServerManager createServerManager( Server master, MasterServices services) throws IOException { ServerManager sm=super.createServerManager(master,services); ServerManager spy=Mockito.spy(sm); Mockito.doReturn(true).when(spy).sendRegionClose((ServerName)Mockito.any(),(HRegionInfo)Mockito.any(),(ServerName)Mockito.any()); return spy; } @Override public ClusterConnection getConnection(){ try { return HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(),rs0,rs0,rs0.getServerName(),HRegionInfo.FIRST_META_REGIONINFO); } catch ( IOException e) { return null; } } } ; master.start(); try { while (!master.serviceStarted) Threads.sleep(10); for (int i=0; i < sns.length; i++) { RegionServerReportRequest.Builder request=RegionServerReportRequest.newBuilder(); ; ServerName sn=ServerName.parseVersionedServerName(sns[i].getVersionedBytes()); request.setServer(ProtobufUtil.toServerName(sn)); request.setLoad(ServerLoad.EMPTY_SERVERLOAD.obtainServerLoadPB()); master.getMasterRpcServices().regionServerReport(null,request.build()); } while (!master.isInitialized()) { Threads.sleep(100); } assertTrue(master.isInitialized()); } finally { rs0.stop("Test is done"); rs1.stop("Test is done"); rs2.stop("Test is done"); master.stopMaster(); master.join(); } }

Class: org.apache.hadoop.hbase.master.TestMasterRestartAfterDisablingTable

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() throws Exception { final int NUM_MASTERS=2; final int NUM_RS=1; final int NUM_REGIONS_TO_CREATE=4; log("Starting cluster"); Configuration conf=HBaseConfiguration.create(); HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(conf); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); log("Waiting for active/ready master"); cluster.waitForActiveAndReadyMaster(); TableName table=TableName.valueOf("tableRestart"); byte[] family=Bytes.toBytes("family"); log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions"); Table ht=TEST_UTIL.createMultiRegionTable(table,family,NUM_REGIONS_TO_CREATE); int numRegions=-1; try (RegionLocator r=TEST_UTIL.getConnection().getRegionLocator(table)){ numRegions=r.getStartKeys().length; } numRegions+=1; log("Waiting for no more RIT\n"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Disabling table\n"); TEST_UTIL.getHBaseAdmin().disableTable(table); NavigableSet regions=HBaseTestingUtility.getAllOnlineRegions(cluster); assertEquals("The number of regions for the table tableRestart should be 0 and only" + "the catalog and namespace tables should be present.",2,regions.size()); List masterThreads=cluster.getMasterThreads(); MasterThread activeMaster=null; if (masterThreads.get(0).getMaster().isActiveMaster()) { activeMaster=masterThreads.get(0); } else { activeMaster=masterThreads.get(1); } activeMaster.getMaster().stop("stopping the active master so that the backup can become active"); cluster.hbaseCluster.waitOnMaster(activeMaster); cluster.waitForActiveAndReadyMaster(); assertTrue("The table should not be in enabled state",cluster.getMaster().getAssignmentManager().getTableStateManager().isTableState(TableName.valueOf("tableRestart"),TableState.State.DISABLED,TableState.State.DISABLING)); log("Enabling table\n"); Admin admin=TEST_UTIL.getHBaseAdmin(); admin.enableTable(table); admin.close(); log("Waiting for no more RIT\n"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Verifying there are " + numRegions + " assigned on cluster\n"); regions=HBaseTestingUtility.getAllOnlineRegions(cluster); assertEquals("The assigned regions were not onlined after master" + " switch except for the catalog and namespace tables.",6,regions.size()); assertTrue("The table should be in enabled state",cluster.getMaster().getAssignmentManager().getTableStateManager().isTableState(TableName.valueOf("tableRestart"),TableState.State.ENABLED)); ht.close(); TEST_UTIL.shutdownMiniCluster(); }

Class: org.apache.hadoop.hbase.master.TestMasterShutdown

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Simple test of shutdown. *

* Starts with three masters. Tells the active master to shutdown the cluster. * Verifies that all masters are properly shutdown. * @throws Exception */ @Test(timeout=120000) public void testMasterShutdown() throws Exception { final int NUM_MASTERS=3; final int NUM_RS=3; Configuration conf=HBaseConfiguration.create(); HBaseTestingUtility htu=new HBaseTestingUtility(conf); htu.startMiniCluster(NUM_MASTERS,NUM_RS); MiniHBaseCluster cluster=htu.getHBaseCluster(); List masterThreads=cluster.getMasterThreads(); for ( MasterThread mt : masterThreads) { assertTrue(mt.isAlive()); } HMaster active=null; for (int i=0; i < masterThreads.size(); i++) { if (masterThreads.get(i).getMaster().isActiveMaster()) { active=masterThreads.get(i).getMaster(); break; } } assertNotNull(active); ClusterStatus status=active.getClusterStatus(); assertEquals(2,status.getBackupMastersSize()); assertEquals(2,status.getBackupMasters().size()); active.shutdown(); for (int i=NUM_MASTERS - 1; i >= 0; --i) { cluster.waitOnMaster(i); } assertEquals(0,masterThreads.size()); htu.shutdownMiniCluster(); }


InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception { final int NUM_MASTERS=1; final int NUM_RS=0; Configuration conf=HBaseConfiguration.create(); conf.setInt("hbase.ipc.client.failed.servers.expiry",200); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,1); final HBaseTestingUtility util=new HBaseTestingUtility(conf); util.startMiniDFSCluster(3); util.startMiniZKCluster(); util.createRootDir(); final LocalHBaseCluster cluster=new LocalHBaseCluster(conf,NUM_MASTERS,NUM_RS,HMaster.class,MiniHBaseCluster.MiniHBaseClusterRegionServer.class); final int MASTER_INDEX=0; final MasterThread master=cluster.getMasters().get(MASTER_INDEX); master.start(); LOG.info("Called master start on " + master.getName()); Thread shutdownThread=new Thread(){ public void run(){ LOG.info("Before call to shutdown master"); try { try (Connection connection=ConnectionFactory.createConnection(util.getConfiguration())){ try (Admin admin=connection.getAdmin()){ admin.shutdown(); } } LOG.info("After call to shutdown master"); cluster.waitOnMaster(MASTER_INDEX); } catch ( Exception e) { } } } ; shutdownThread.start(); LOG.info("Called master join on " + master.getName()); master.join(); shutdownThread.join(); List masterThreads=cluster.getMasters(); assertEquals(0,masterThreads.size()); util.shutdownMiniZKCluster(); util.shutdownMiniDFSCluster(); util.cleanupTestDir(); }

Class: org.apache.hadoop.hbase.master.TestMetaShutdownHandler

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test will test the expire handling of a meta-carrying * region server. * After HBaseMiniCluster is up, we will delete the ephemeral * node of the meta-carrying region server, which will trigger * the expire of this region server on the master. * On the other hand, we will slow down the abort process on * the region server so that it is still up during the master SSH. * We will check that the master SSH is still successfully done. */ @Test(timeout=180000) public void testExpireMetaRegionServer() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); RegionStates regionStates=master.getAssignmentManager().getRegionStates(); ServerName metaServerName=regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO); if (master.getServerName().equals(metaServerName) || metaServerName == null || !metaServerName.equals(cluster.getServerHoldingMeta())) { metaServerName=cluster.getLiveRegionServerThreads().get(0).getRegionServer().getServerName(); master.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),Bytes.toBytes(metaServerName.getServerName())); TEST_UTIL.waitUntilNoRegionsInTransition(60000); } RegionState metaState=MetaTableLocator.getMetaRegionState(master.getZooKeeper()); assertEquals("Meta should be not in transition",metaState.getState(),RegionState.State.OPEN); assertNotEquals("Meta should be moved off master",metaServerName,master.getServerName()); String rsEphemeralNodePath=ZKUtil.joinZNode(master.getZooKeeper().rsZNode,metaServerName.toString()); ZKUtil.deleteNode(master.getZooKeeper(),rsEphemeralNodePath); final ServerManager serverManager=master.getServerManager(); final ServerName priorMetaServerName=metaServerName; TEST_UTIL.waitFor(120000,200,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return !serverManager.isServerOnline(priorMetaServerName) && !serverManager.areDeadServersInProgress(); } } ); TEST_UTIL.waitUntilNoRegionsInTransition(60000); assertTrue("Meta should be assigned",regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); metaState=MetaTableLocator.getMetaRegionState(master.getZooKeeper()); assertEquals("Meta should be not in transition",metaState.getState(),RegionState.State.OPEN); assertEquals("Meta should be assigned",metaState.getServerName(),regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO)); assertNotEquals("Meta should be assigned on a different server",metaState.getServerName(),metaServerName); }

Class: org.apache.hadoop.hbase.master.TestMetricsMasterProcSourceImpl

InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
@Test public void testGetInstance() throws Exception { MetricsMasterProcSourceFactory metricsMasterProcSourceFactory=CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class); MetricsMasterProcSource masterProcSource=metricsMasterProcSourceFactory.create(null); assertTrue(masterProcSource instanceof MetricsMasterProcSourceImpl); assertSame(metricsMasterProcSourceFactory,CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); }

Class: org.apache.hadoop.hbase.master.TestMetricsMasterSourceImpl

InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
@Test public void testGetInstance() throws Exception { MetricsMasterSourceFactory metricsMasterSourceFactory=CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); MetricsMasterSource masterSource=metricsMasterSourceFactory.create(null); assertTrue(masterSource instanceof MetricsMasterSourceImpl); assertSame(metricsMasterSourceFactory,CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); }

Class: org.apache.hadoop.hbase.master.TestRegionPlacement

APIUtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
@Ignore("Test for unfinished feature") @Test public void testRegionPlacement() throws Exception { String tableStr="testRegionAssignment"; TableName table=TableName.valueOf(tableStr); createTable(table,REGION_NUM); TEST_UTIL.waitTableAvailable(table); verifyRegionOnPrimaryRS(REGION_NUM); FavoredNodesPlan currentPlan=rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan(); verifyRegionServerUpdated(currentPlan); FavoredNodesPlan shuffledPlan=this.shuffleAssignmentPlan(currentPlan,FavoredNodesPlan.Position.SECONDARY,FavoredNodesPlan.Position.TERTIARY); rp.updateAssignmentPlan(shuffledPlan); verifyRegionAssignment(shuffledPlan,0,REGION_NUM); shuffledPlan=this.shuffleAssignmentPlan(currentPlan,FavoredNodesPlan.Position.PRIMARY,FavoredNodesPlan.Position.SECONDARY); rp.updateAssignmentPlan(shuffledPlan); verifyRegionAssignment(shuffledPlan,REGION_NUM,REGION_NUM); RegionPlacementMaintainer rp=new RegionPlacementMaintainer(TEST_UTIL.getConfiguration()); rp.setTargetTableName(new String[]{tableStr}); List reports=rp.verifyRegionPlacement(false); AssignmentVerificationReport report=reports.get(0); assertTrue(report.getRegionsWithoutValidFavoredNodes().size() == 0); assertTrue(report.getNonFavoredAssignedRegions().size() == 0); assertTrue(report.getTotalFavoredAssignments() >= REGION_NUM); assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) != 0); assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) == 0); assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY) == 0); assertTrue(report.getUnassignedRegions().size() == 0); killRandomServerAndVerifyAssignment(); reports=rp.verifyRegionPlacement(false); report=reports.get(0); assertTrue(report.getRegionsWithoutValidFavoredNodes().size() == 0); assertTrue(report.getNonFavoredAssignedRegions().size() == 0); assertTrue(report.getTotalFavoredAssignments() >= REGION_NUM); assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) > 0); assertTrue("secondary " + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) + " tertiary "+ report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY),(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) > 0 || report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY) > 0)); assertTrue((report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY)) == REGION_NUM); RegionPlacementMaintainer.printAssignmentPlan(currentPlan); }

Class: org.apache.hadoop.hbase.master.TestRegionPlacement2

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException { LoadBalancer balancer=LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); List servers=new ArrayList(); for (int i=0; i < SLAVES; i++) { ServerName server=TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); servers.add(server); } List regions=new ArrayList(1); HRegionInfo region=new HRegionInfo(TableName.valueOf("foobar")); regions.add(region); Map> assignmentMap=balancer.roundRobinAssignment(regions,servers); Set serverBefore=assignmentMap.keySet(); List favoredNodesBefore=((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); assertTrue(favoredNodesBefore.size() == 3); assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(),favoredNodesBefore.get(PRIMARY))); List removedServers=removeMatchingServers(serverBefore,servers); assignmentMap=balancer.roundRobinAssignment(regions,servers); List favoredNodesAfter=((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); assertTrue(favoredNodesAfter.size() == 3); assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore)); Set serverAfter=assignmentMap.keySet(); assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),favoredNodesBefore.get(SECONDARY)) || ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),favoredNodesBefore.get(TERTIARY))); servers.addAll(removedServers); assignmentMap=balancer.roundRobinAssignment(regions,servers); Set serverWithPrimary=assignmentMap.keySet(); assertTrue(serverBefore.containsAll(serverWithPrimary)); removeMatchingServers(favoredNodesAfter,servers); assignmentMap=balancer.roundRobinAssignment(regions,servers); List favoredNodesNow=((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); assertTrue(favoredNodesNow.size() == 3); assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); }

InternalCallVerifier BooleanVerifier 
@Test public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException { LoadBalancer balancer=LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); List servers=new ArrayList(); for (int i=0; i < SLAVES; i++) { ServerName server=TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); servers.add(server); } List regions=new ArrayList(1); HRegionInfo region=new HRegionInfo(TableName.valueOf("foobar")); regions.add(region); ServerName serverBefore=balancer.randomAssignment(region,servers); List favoredNodesBefore=((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); assertTrue(favoredNodesBefore.size() == 3); assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY))); removeMatchingServers(serverBefore,servers); ServerName serverAfter=balancer.randomAssignment(region,servers); List favoredNodesAfter=((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); assertTrue(favoredNodesAfter.size() == 3); assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore)); assertTrue(ServerName.isSameHostnameAndPort(serverAfter,favoredNodesBefore.get(SECONDARY)) || ServerName.isSameHostnameAndPort(serverAfter,favoredNodesBefore.get(TERTIARY))); removeMatchingServers(favoredNodesAfter,servers); balancer.randomAssignment(region,servers); List favoredNodesNow=((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); assertTrue(favoredNodesNow.size() == 3); assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); }

Class: org.apache.hadoop.hbase.master.TestRegionPlan

InternalCallVerifier EqualityVerifier 
@Test public void test(){ HRegionInfo hri=new HRegionInfo(TableName.valueOf("table")); ServerName source=ServerName.valueOf("source",1234,2345); ServerName dest=ServerName.valueOf("dest",1234,2345); RegionPlan plan=new RegionPlan(hri,source,dest); assertEquals(plan.hashCode(),new RegionPlan(hri,source,dest).hashCode()); assertEquals(plan,new RegionPlan(hri,source,dest)); assertEquals(plan.hashCode(),new RegionPlan(hri,dest,source).hashCode()); assertEquals(plan,new RegionPlan(hri,dest,source)); HRegionInfo other=new HRegionInfo(TableName.valueOf("other")); assertNotEquals(plan.hashCode(),new RegionPlan(other,source,dest).hashCode()); assertNotEquals(plan,new RegionPlan(other,source,dest)); }

Class: org.apache.hadoop.hbase.master.TestRegionState

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void test(){ RegionState state1=new RegionState(new HRegionInfo(TableName.valueOf("table")),RegionState.State.OPENING); ClusterStatusProtos.RegionState protobuf1=state1.convert(); RegionState state2=RegionState.convert(protobuf1); ClusterStatusProtos.RegionState protobuf2=state1.convert(); assertEquals(state1,state2); assertEquals(protobuf1,protobuf2); }

Class: org.apache.hadoop.hbase.master.TestRestartCluster

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This tests retaining assignments on a cluster restart */ @Test(timeout=300000) public void testRetainAssignmentOnRestart() throws Exception { UTIL.startMiniCluster(2); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { Threads.sleep(1); } UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false); LOG.info("\n\nCreating tables"); for ( TableName TABLE : TABLES) { UTIL.createTable(TABLE,FAMILY); } for ( TableName TABLE : TABLES) { UTIL.waitTableEnabled(TABLE); } HMaster master=UTIL.getMiniHBaseCluster().getMaster(); UTIL.waitUntilNoRegionsInTransition(120000); SnapshotOfRegionAssignmentFromMeta snapshot=new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); snapshot.initialize(); Map regionToRegionServerMap=snapshot.getRegionToRegionServerMap(); MiniHBaseCluster cluster=UTIL.getHBaseCluster(); List threads=cluster.getLiveRegionServerThreads(); assertEquals(2,threads.size()); int[] rsPorts=new int[3]; for (int i=0; i < 2; i++) { rsPorts[i]=threads.get(i).getRegionServer().getServerName().getPort(); } rsPorts[2]=cluster.getMaster().getServerName().getPort(); for ( ServerName serverName : regionToRegionServerMap.values()) { boolean found=false; for (int k=0; k < 3 && !found; k++) { found=serverName.getPort() == rsPorts[k]; } assertTrue(found); } LOG.info("\n\nShutting down HBase cluster"); cluster.shutdown(); cluster.waitUntilShutDown(); LOG.info("\n\nSleeping a bit"); Thread.sleep(2000); LOG.info("\n\nStarting cluster the second time with the same ports"); try { cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,4); master=cluster.startMaster().getMaster(); for (int i=0; i < 3; i++) { cluster.getConf().setInt(HConstants.REGIONSERVER_PORT,rsPorts[i]); cluster.startRegionServer(); } } finally { cluster.getConf().setInt(HConstants.REGIONSERVER_PORT,0); cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,2); } List localServers=master.getServerManager().getOnlineServersList(); assertEquals(4,localServers.size()); for (int i=0; i < 3; i++) { boolean found=false; for ( ServerName serverName : localServers) { if (serverName.getPort() == rsPorts[i]) { found=true; break; } } assertTrue(found); } RegionStates regionStates=master.getAssignmentManager().getRegionStates(); int expectedRegions=regionToRegionServerMap.size() + 1; while (!master.isInitialized() || regionStates.getRegionAssignments().size() != expectedRegions) { Threads.sleep(100); } snapshot=new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); snapshot.initialize(); Map newRegionToRegionServerMap=snapshot.getRegionToRegionServerMap(); assertEquals(regionToRegionServerMap.size(),newRegionToRegionServerMap.size()); for ( Map.Entry entry : newRegionToRegionServerMap.entrySet()) { if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) continue; ServerName oldServer=regionToRegionServerMap.get(entry.getKey()); ServerName currentServer=entry.getValue(); assertEquals(oldServer.getHostAndPort(),currentServer.getHostAndPort()); assertNotEquals(oldServer.getStartcode(),currentServer.getStartcode()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testClusterRestart() throws Exception { UTIL.startMiniCluster(3); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { Threads.sleep(1); } LOG.info("\n\nCreating tables"); for ( TableName TABLE : TABLES) { UTIL.createTable(TABLE,FAMILY); } for ( TableName TABLE : TABLES) { UTIL.waitTableEnabled(TABLE); } List allRegions=MetaTableAccessor.getAllRegions(UTIL.getConnection(),false); assertEquals(4,allRegions.size()); LOG.info("\n\nShutting down cluster"); UTIL.shutdownMiniHBaseCluster(); LOG.info("\n\nSleeping a bit"); Thread.sleep(2000); LOG.info("\n\nStarting cluster the second time"); UTIL.restartHBaseCluster(3); allRegions=MetaTableAccessor.getAllRegions(UTIL.getConnection(),false); assertEquals(4,allRegions.size()); LOG.info("\n\nWaiting for tables to be available"); for ( TableName TABLE : TABLES) { try { UTIL.createTable(TABLE,FAMILY); assertTrue("Able to create table that should already exist",false); } catch ( TableExistsException tee) { LOG.info("Table already exists as expected"); } UTIL.waitTableAvailable(TABLE); } }

Class: org.apache.hadoop.hbase.master.TestRollingRestart

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=500000) public void testBasicRollingRestart() throws Exception { final int NUM_MASTERS=2; final int NUM_RS=3; final int NUM_REGIONS_TO_CREATE=20; int expectedNumRS=3; log("Starting cluster"); Configuration conf=HBaseConfiguration.create(); HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(conf); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); log("Waiting for active/ready master"); cluster.waitForActiveAndReadyMaster(); TableName table=TableName.valueOf("tableRestart"); byte[] family=Bytes.toBytes("family"); log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions"); Table ht=TEST_UTIL.createMultiRegionTable(table,family,NUM_REGIONS_TO_CREATE); int numRegions=-1; try (RegionLocator r=TEST_UTIL.getConnection().getRegionLocator(table)){ numRegions=r.getStartKeys().length; } numRegions+=1; log("Waiting for no more RIT\n"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Disabling table\n"); TEST_UTIL.getHBaseAdmin().disableTable(table); log("Waiting for no more RIT\n"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); NavigableSet regions=HBaseTestingUtility.getAllOnlineRegions(cluster); log("Verifying only catalog and namespace regions are assigned\n"); if (regions.size() != 2) { for ( String oregion : regions) log("Region still online: " + oregion); } assertEquals(2,regions.size()); log("Enabling table\n"); TEST_UTIL.getHBaseAdmin().enableTable(table); log("Waiting for no more RIT\n"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Verifying there are " + numRegions + " assigned on cluster\n"); regions=HBaseTestingUtility.getAllOnlineRegions(cluster); assertRegionsAssigned(cluster,regions); assertEquals(expectedNumRS,cluster.getRegionServerThreads().size()); log("Adding a fourth RS"); RegionServerThread restarted=cluster.startRegionServer(); expectedNumRS++; restarted.waitForServerOnline(); log("Additional RS is online"); log("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Verifying there are " + numRegions + " assigned on cluster"); assertRegionsAssigned(cluster,regions); assertEquals(expectedNumRS,cluster.getRegionServerThreads().size()); List masterThreads=cluster.getMasterThreads(); MasterThread activeMaster=null; MasterThread backupMaster=null; assertEquals(2,masterThreads.size()); if (masterThreads.get(0).getMaster().isActiveMaster()) { activeMaster=masterThreads.get(0); backupMaster=masterThreads.get(1); } else { activeMaster=masterThreads.get(1); backupMaster=masterThreads.get(0); } log("Stopping backup master\n\n"); backupMaster.getMaster().stop("Stop of backup during rolling restart"); cluster.hbaseCluster.waitOnMaster(backupMaster); log("Stopping primary master\n\n"); activeMaster.getMaster().stop("Stop of active during rolling restart"); cluster.hbaseCluster.waitOnMaster(activeMaster); log("Restarting primary master\n\n"); activeMaster=cluster.startMaster(); cluster.waitForActiveAndReadyMaster(); log("Restarting backup master\n\n"); backupMaster=cluster.startMaster(); assertEquals(expectedNumRS,cluster.getRegionServerThreads().size()); List regionServers=cluster.getLiveRegionServerThreads(); int num=1; int total=regionServers.size(); for ( RegionServerThread rst : regionServers) { ServerName serverName=rst.getRegionServer().getServerName(); log("Stopping region server " + num + " of "+ total+ " [ "+ serverName+ "]"); rst.getRegionServer().stop("Stopping RS during rolling restart"); cluster.hbaseCluster.waitOnRegionServer(rst); log("Waiting for RS shutdown to be handled by master"); waitForRSShutdownToStartAndFinish(activeMaster,serverName); log("RS shutdown done, waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Verifying there are " + numRegions + " assigned on cluster"); assertRegionsAssigned(cluster,regions); expectedNumRS--; assertEquals(expectedNumRS,cluster.getRegionServerThreads().size()); log("Restarting region server " + num + " of "+ total); restarted=cluster.startRegionServer(); restarted.waitForServerOnline(); expectedNumRS++; log("Region server " + num + " is back online"); log("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Verifying there are " + numRegions + " assigned on cluster"); assertRegionsAssigned(cluster,regions); assertEquals(expectedNumRS,cluster.getRegionServerThreads().size()); num++; } Thread.sleep(1000); assertRegionsAssigned(cluster,regions); ht.close(); TEST_UTIL.shutdownMiniCluster(); }

Class: org.apache.hadoop.hbase.master.TestSplitLogManager

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void setup() throws Exception { TEST_UTIL=new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); conf=TEST_UTIL.getConfiguration(); zkw=new ZooKeeperWatcher(conf,"split-log-manager-tests" + UUID.randomUUID().toString(),null); ds=new DummyServer(zkw,conf); ZKUtil.deleteChildrenRecursively(zkw,zkw.baseZNode); ZKUtil.createAndFailSilent(zkw,zkw.baseZNode); assertTrue(ZKUtil.checkExists(zkw,zkw.baseZNode) != -1); LOG.debug(zkw.baseZNode + " created"); ZKUtil.createAndFailSilent(zkw,zkw.splitLogZNode); assertTrue(ZKUtil.checkExists(zkw,zkw.splitLogZNode) != -1); LOG.debug(zkw.splitLogZNode + " created"); stopped=false; resetCounters(); Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true); Mockito.when(master.getServerManager()).thenReturn(sm); to=12000; conf.setInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT,to); conf.setInt("hbase.splitlog.manager.unassigned.timeout",2 * to); conf.setInt("hbase.splitlog.manager.timeoutmonitor.period",100); to=to + 16 * 100; this.mode=(conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testTaskResigned() throws Exception { LOG.info("TestTaskResigned - resubmit task node once in RESIGNED state"); assertEquals(tot_mgr_resubmit.get(),0); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); assertEquals(tot_mgr_resubmit.get(),0); TaskBatch batch=new TaskBatch(); String tasknode=submitTaskAndWait(batch,"foo/1"); assertEquals(tot_mgr_resubmit.get(),0); final ServerName worker1=ServerName.valueOf("worker1,1,1"); assertEquals(tot_mgr_resubmit.get(),0); SplitLogTask slt=new SplitLogTask.Resigned(worker1,this.mode); assertEquals(tot_mgr_resubmit.get(),0); ZKUtil.setData(zkw,tasknode,slt.toByteArray()); ZKUtil.checkExists(zkw,tasknode); if (tot_mgr_resubmit.get() == 0) { waitForCounter(tot_mgr_resubmit,0,1,to / 2); } assertEquals(tot_mgr_resubmit.get(),1); byte[] taskstate=ZKUtil.getData(zkw,tasknode); slt=SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(DUMMY_MASTER)); }

InternalCallVerifier BooleanVerifier PublicFieldVerifier 
/** * The following test case is aiming to test the situation when distributedLogReplay is turned off * and restart a cluster there should no recovery regions in ZK left. * @throws Exception */ @Test(timeout=300000) public void testRecoveryRegionRemovedFromZK() throws Exception { LOG.info("testRecoveryRegionRemovedFromZK"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,false); String nodePath=ZKUtil.joinZNode(zkw.recoveringRegionsZNode,HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); ZKUtil.createSetData(zkw,nodePath,ZKUtil.positionToByteArray(0L)); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); slm.removeStaleRecoveringRegions(null); List recoveringRegions=zkw.getRecoverableZooKeeper().getChildren(zkw.recoveringRegionsZNode,false); assertTrue("Recovery regions isn't cleaned",recoveringRegions.isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testUnassignedOrphan() throws Exception { LOG.info("TestUnassignedOrphan - an unassigned task is resubmitted at" + " startup"); String tasknode=ZKSplitLog.getEncodedNodeName(zkw,"orphan/test/slash"); SplitLogTask slt=new SplitLogTask.Unassigned(DUMMY_MASTER,this.mode); zkw.getRecoverableZooKeeper().create(tasknode,slt.toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); int version=ZKUtil.checkExists(zkw,tasknode); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); waitForCounter(tot_mgr_orphan_task_acquired,0,1,to / 2); Task task=slm.findOrCreateOrphanTask(tasknode); assertTrue(task.isOrphan()); assertTrue(task.isUnassigned()); waitForCounter(tot_mgr_rescan,0,1,to / 2); Task task2=slm.findOrCreateOrphanTask(tasknode); assertTrue(task == task2); LOG.debug("task = " + task); assertEquals(1L,tot_mgr_resubmit.get()); assertEquals(1,task.incarnation.get()); assertEquals(0,task.unforcedResubmits.get()); assertTrue(task.isOrphan()); assertTrue(task.isUnassigned()); assertTrue(ZKUtil.checkExists(zkw,tasknode) > version); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testRescanCleanup() throws Exception { LOG.info("TestRescanCleanup - ensure RESCAN nodes are cleaned up"); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); TaskBatch batch=new TaskBatch(); String tasknode=submitTaskAndWait(batch,"foo/1"); int version=ZKUtil.checkExists(zkw,tasknode); final ServerName worker1=ServerName.valueOf("worker1,1,1"); SplitLogTask slt=new SplitLogTask.Owned(worker1,this.mode); ZKUtil.setData(zkw,tasknode,slt.toByteArray()); waitForCounter(tot_mgr_heartbeat,0,1,to / 2); waitForCounter(new Expr(){ @Override public long eval(){ return (tot_mgr_resubmit.get() + tot_mgr_resubmit_failed.get()); } } ,0,1,5 * 60000); Assert.assertEquals("Could not run test. Lost ZK connection?",0,tot_mgr_resubmit_failed.get()); int version1=ZKUtil.checkExists(zkw,tasknode); assertTrue(version1 > version); byte[] taskstate=ZKUtil.getData(zkw,tasknode); slt=SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(DUMMY_MASTER)); waitForCounter(tot_mgr_rescan_deleted,0,1,to / 2); }

InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
@Ignore("DLR is broken by HBASE-12751") @Test(timeout=60000) public void testGetPreviousRecoveryMode() throws Exception { LOG.info("testGetPreviousRecoveryMode"); SplitLogCounters.resetCounters(); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,true); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw,"testRecovery"),new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"),RecoveryMode.LOG_SPLITTING).toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); LOG.info("Mode1=" + slm.getRecoveryMode()); assertTrue(slm.isLogSplitting()); zkw.getRecoverableZooKeeper().delete(ZKSplitLog.getEncodedNodeName(zkw,"testRecovery"),-1); LOG.info("Mode2=" + slm.getRecoveryMode()); slm.setRecoveryMode(false); LOG.info("Mode3=" + slm.getRecoveryMode()); assertTrue("Mode4=" + slm.getRecoveryMode(),slm.isLogReplaying()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testOrphanTaskAcquisition() throws Exception { LOG.info("TestOrphanTaskAcquisition"); String tasknode=ZKSplitLog.getEncodedNodeName(zkw,"orphan/test/slash"); SplitLogTask slt=new SplitLogTask.Owned(DUMMY_MASTER,this.mode); zkw.getRecoverableZooKeeper().create(tasknode,slt.toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); waitForCounter(tot_mgr_orphan_task_acquired,0,1,to / 2); Task task=slm.findOrCreateOrphanTask(tasknode); assertTrue(task.isOrphan()); waitForCounter(tot_mgr_heartbeat,0,1,to / 2); assertFalse(task.isUnassigned()); long curt=System.currentTimeMillis(); assertTrue((task.last_update <= curt) && (task.last_update > (curt - 1000))); LOG.info("waiting for manager to resubmit the orphan task"); waitForCounter(tot_mgr_resubmit,0,1,to + to / 2); assertTrue(task.isUnassigned()); waitForCounter(tot_mgr_rescan,0,1,to + to / 2); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test whether the splitlog correctly creates a task in zookeeper * @throws Exception */ @Test(timeout=180000) public void testTaskCreation() throws Exception { LOG.info("TestTaskCreation - test the creation of a task in zk"); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); TaskBatch batch=new TaskBatch(); String tasknode=submitTaskAndWait(batch,"foo/1"); byte[] data=ZKUtil.getData(zkw,tasknode); SplitLogTask slt=SplitLogTask.parseFrom(data); LOG.info("Task node created " + slt.toString()); assertTrue(slt.isUnassigned(DUMMY_MASTER)); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testLogFilesAreArchived() throws Exception { LOG.info("testLogFilesAreArchived"); final SplitLogManager slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); FileSystem fs=TEST_UTIL.getTestFileSystem(); Path dir=TEST_UTIL.getDataTestDirOnTestFS("testLogFilesAreArchived"); conf.set(HConstants.HBASE_DIR,dir.toString()); Path logDirPath=new Path(dir,UUID.randomUUID().toString()); fs.mkdirs(logDirPath); String logFile=ServerName.valueOf("foo",1,1).toString(); fs.create(new Path(logDirPath,logFile)).close(); new Thread(){ @Override public void run(){ boolean done=false; while (!done) { for ( Map.Entry entry : slm.getTasks().entrySet()) { final ServerName worker1=ServerName.valueOf("worker1,1,1"); SplitLogTask slt=new SplitLogTask.Done(worker1,RecoveryMode.LOG_SPLITTING); boolean encounteredZKException=false; try { ZKUtil.setData(zkw,entry.getKey(),slt.toByteArray()); } catch ( KeeperException e) { LOG.warn(e); encounteredZKException=true; } if (!encounteredZKException) { done=true; } } } } } .start(); slm.splitLogDistributed(logDirPath); assertFalse(fs.exists(logDirPath)); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testEmptyLogDir() throws Exception { LOG.info("testEmptyLogDir"); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); FileSystem fs=TEST_UTIL.getTestFileSystem(); Path emptyLogDirPath=new Path(fs.getWorkingDirectory(),UUID.randomUUID().toString()); fs.mkdirs(emptyLogDirPath); slm.splitLogDistributed(emptyLogDirPath); assertFalse(fs.exists(emptyLogDirPath)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testDeadWorker() throws Exception { LOG.info("testDeadWorker"); conf.setLong("hbase.splitlog.max.resubmit",0); slm=new SplitLogManager(ds,conf,stopper,master,DUMMY_MASTER); TaskBatch batch=new TaskBatch(); String tasknode=submitTaskAndWait(batch,"foo/1"); int version=ZKUtil.checkExists(zkw,tasknode); final ServerName worker1=ServerName.valueOf("worker1,1,1"); SplitLogTask slt=new SplitLogTask.Owned(worker1,this.mode); ZKUtil.setData(zkw,tasknode,slt.toByteArray()); if (tot_mgr_heartbeat.get() == 0) waitForCounter(tot_mgr_heartbeat,0,1,to / 2); slm.handleDeadWorker(worker1); if (tot_mgr_resubmit.get() == 0) waitForCounter(tot_mgr_resubmit,0,1,to + to / 2); if (tot_mgr_resubmit_dead_server_task.get() == 0) { waitForCounter(tot_mgr_resubmit_dead_server_task,0,1,to + to / 2); } int version1=ZKUtil.checkExists(zkw,tasknode); assertTrue(version1 > version); byte[] taskstate=ZKUtil.getData(zkw,tasknode); slt=SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(DUMMY_MASTER)); return; }

Class: org.apache.hadoop.hbase.master.TestTableLockManager

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=600000) public void testTableReadLock() throws Exception { prepareMiniCluster(); LoadTestTool loadTool=new LoadTestTool(); loadTool.setConf(TEST_UTIL.getConfiguration()); int numKeys=10000; final TableName tableName=TableName.valueOf("testTableReadLock"); final Admin admin=TEST_UTIL.getHBaseAdmin(); final HTableDescriptor desc=new HTableDescriptor(tableName); final byte[] family=Bytes.toBytes("test_cf"); desc.addFamily(new HColumnDescriptor(family)); admin.createTable(desc); int ret=loadTool.run(new String[]{"-tn",tableName.getNameAsString(),"-write",String.format("%d:%d:%d",1,10,10),"-num_keys",String.valueOf(numKeys),"-skip_init"}); if (0 != ret) { String errorMsg="Load failed with error code " + ret; LOG.error(errorMsg); fail(errorMsg); } int familyValues=admin.getTableDescriptor(tableName).getFamily(family).getValues().size(); StoppableImplementation stopper=new StoppableImplementation(); final ChoreService choreService=new ChoreService("TEST_SERVER_NAME"); ScheduledChore alterThread=new ScheduledChore("Alter Chore",stopper,10000){ @Override protected void chore(){ Random random=new Random(); try { HTableDescriptor htd=admin.getTableDescriptor(tableName); String val=String.valueOf(random.nextInt()); htd.getFamily(family).setValue(val,val); desc.getFamily(family).setValue(val,val); admin.modifyTable(tableName,htd); } catch ( Exception ex) { LOG.warn("Caught exception",ex); fail(ex.getMessage()); } } } ; ScheduledChore splitThread=new ScheduledChore("Split thread",stopper,5000){ @Override public void chore(){ try { HRegion region=TEST_UTIL.getSplittableRegion(tableName,-1); if (region != null) { byte[] regionName=region.getRegionInfo().getRegionName(); admin.flushRegion(regionName); admin.compactRegion(regionName); admin.splitRegion(regionName); } else { LOG.warn("Could not find suitable region for the table. Possibly the " + "region got closed and the attempts got over before " + "the region could have got reassigned."); } } catch ( NotServingRegionException nsre) { LOG.warn("Caught exception",nsre); } catch ( Exception ex) { LOG.warn("Caught exception",ex); fail(ex.getMessage()); } } } ; choreService.scheduleChore(alterThread); choreService.scheduleChore(splitThread); TEST_UTIL.waitTableEnabled(tableName); while (true) { List regions=admin.getTableRegions(tableName); LOG.info(String.format("Table #regions: %d regions: %s:",regions.size(),regions)); assertEquals(admin.getTableDescriptor(tableName),desc); for ( HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) { HTableDescriptor regionTableDesc=region.getTableDesc(); assertEquals(desc,regionTableDesc); } if (regions.size() >= 5) { break; } Threads.sleep(1000); } stopper.stop("test finished"); int newFamilyValues=admin.getTableDescriptor(tableName).getFamily(family).getValues().size(); LOG.info(String.format("Altered the table %d times",newFamilyValues - familyValues)); assertTrue(newFamilyValues > familyValues); ret=loadTool.run(new String[]{"-tn",tableName.getNameAsString(),"-read","100:10","-num_keys",String.valueOf(numKeys),"-skip_init"}); if (0 != ret) { String errorMsg="Verify failed with error code " + ret; LOG.error(errorMsg); fail(errorMsg); } admin.close(); choreService.shutdown(); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=600000) public void testAlterAndDisable() throws Exception { prepareMiniCluster(); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); master.getMasterCoprocessorHost().load(TestAlterAndDisableMasterObserver.class,0,TEST_UTIL.getConfiguration()); ExecutorService executor=Executors.newFixedThreadPool(2); Future alterTableFuture=executor.submit(new Callable(){ @Override public Object call() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); admin.addColumnFamily(TABLE_NAME,new HColumnDescriptor(NEW_FAMILY)); LOG.info("Added new column family"); HTableDescriptor tableDesc=admin.getTableDescriptor(TABLE_NAME); assertTrue(tableDesc.getFamiliesKeys().contains(NEW_FAMILY)); return null; } } ); Future disableTableFuture=executor.submit(new Callable(){ @Override public Object call() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); admin.disableTable(TABLE_NAME); assertTrue(admin.isTableDisabled(TABLE_NAME)); admin.deleteTable(TABLE_NAME); assertFalse(admin.tableExists(TABLE_NAME)); return null; } } ); try { disableTableFuture.get(); alterTableFuture.get(); } catch ( ExecutionException e) { if (e.getCause() instanceof AssertionError) { throw (AssertionError)e.getCause(); } throw e; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test(timeout=600000) public void testDelete() throws Exception { prepareMiniCluster(); Admin admin=TEST_UTIL.getHBaseAdmin(); admin.disableTable(TABLE_NAME); admin.deleteTable(TABLE_NAME); final ZooKeeperWatcher zkWatcher=TEST_UTIL.getZooKeeperWatcher(); final String znode=ZKUtil.joinZNode(zkWatcher.tableLockZNode,TABLE_NAME.getNameAsString()); TEST_UTIL.waitFor(5000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { int ver=ZKUtil.checkExists(zkWatcher,znode); return ver < 0; } } ); int ver=ZKUtil.checkExists(zkWatcher,ZKUtil.joinZNode(zkWatcher.tableLockZNode,TABLE_NAME.getNameAsString())); assertTrue("Unexpected znode version " + ver,ver < 0); }

Class: org.apache.hadoop.hbase.master.TestTableStateManager

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testUpgradeFromZk() throws Exception { TableName tableName=TableName.valueOf("testUpgradeFromZk"); TEST_UTIL.startMiniCluster(2,1); TEST_UTIL.shutdownMiniHBaseCluster(); ZooKeeperWatcher watcher=TEST_UTIL.getZooKeeperWatcher(); setTableStateInZK(watcher,tableName,ZooKeeperProtos.DeprecatedTableState.State.DISABLED); TEST_UTIL.restartHBaseCluster(1); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); Assert.assertEquals(master.getTableStateManager().getTableState(tableName),TableState.State.DISABLED); }

Class: org.apache.hadoop.hbase.master.balancer.TestBaseLoadBalancer

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests immediate assignment. * Invariant is that all regions have an assignment. * @throws Exception */ @Test(timeout=30000) public void testImmediateAssignment() throws Exception { List tmp=getListOfServerNames(randomServers(1,0)); tmp.add(master); ServerName sn=loadBalancer.randomAssignment(HRegionInfo.FIRST_META_REGIONINFO,tmp); assertEquals(master,sn); HRegionInfo hri=randomRegions(1,-1).get(0); sn=loadBalancer.randomAssignment(hri,tmp); assertNotEquals(master,sn); tmp=new ArrayList(); tmp.add(master); sn=loadBalancer.randomAssignment(hri,tmp); assertNull("Should not assign user regions on master",sn); for ( int[] mock : regionsAndServersMocks) { LOG.debug("testImmediateAssignment with " + mock[0] + " regions and "+ mock[1]+ " servers"); List regions=randomRegions(mock[0]); List servers=randomServers(mock[1],0); List list=getListOfServerNames(servers); Map assignments=loadBalancer.immediateAssignment(regions,list); assertImmediateAssignment(regions,list,assignments); returnRegions(regions); returnServers(list); } }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests the bulk assignment used during cluster startup. * Round-robin. Should yield a balanced cluster so same invariant as the load * balancer holds, all servers holding either floor(avg) or ceiling(avg). * @throws Exception */ @Test(timeout=180000) public void testBulkAssignment() throws Exception { List tmp=getListOfServerNames(randomServers(5,0)); List hris=randomRegions(20); hris.add(HRegionInfo.FIRST_META_REGIONINFO); tmp.add(master); Map> plans=loadBalancer.roundRobinAssignment(hris,tmp); assertTrue(plans.get(master).contains(HRegionInfo.FIRST_META_REGIONINFO)); assertEquals(1,plans.get(master).size()); int totalRegion=0; for ( List regions : plans.values()) { totalRegion+=regions.size(); } assertEquals(hris.size(),totalRegion); for ( int[] mock : regionsAndServersMocks) { LOG.debug("testBulkAssignment with " + mock[0] + " regions and "+ mock[1]+ " servers"); List regions=randomRegions(mock[0]); List servers=randomServers(mock[1],0); List list=getListOfServerNames(servers); Map> assignments=loadBalancer.roundRobinAssignment(regions,list); float average=(float)regions.size() / servers.size(); int min=(int)Math.floor(average); int max=(int)Math.ceil(average); if (assignments != null && !assignments.isEmpty()) { for ( List regionList : assignments.values()) { assertTrue(regionList.size() == min || regionList.size() == max); } } returnRegions(regions); returnServers(list); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testRegionAvailability() throws Exception { List list0=new ArrayList(); List list1=new ArrayList(); List list2=new ArrayList(); HRegionInfo hri1=new HRegionInfo(TableName.valueOf("table"),"key1".getBytes(),"key2".getBytes(),false,100); HRegionInfo hri2=RegionReplicaUtil.getRegionInfoForReplica(hri1,1); HRegionInfo hri3=new HRegionInfo(TableName.valueOf("table"),"key2".getBytes(),"key3".getBytes(),false,101); list0.add(hri1); list1.add(hri2); list2.add(hri3); Map> clusterState=new LinkedHashMap>(); clusterState.put(servers[0],list0); clusterState.put(servers[1],list1); clusterState.put(servers[2],list2); Cluster cluster=new Cluster(clusterState,null,null,rackManager); assertTrue(cluster.wouldLowerAvailability(hri1,servers[1])); assertTrue(!cluster.wouldLowerAvailability(hri1,servers[2])); assertTrue(!cluster.wouldLowerAvailability(hri2,servers[2])); assertTrue(!cluster.wouldLowerAvailability(hri3,servers[1])); list1.add(RegionReplicaUtil.getRegionInfoForReplica(hri3,1)); cluster=new Cluster(clusterState,null,null,rackManager); assertTrue(cluster.wouldLowerAvailability(hri3,servers[1])); clusterState.clear(); clusterState.put(servers[0],list0); clusterState.put(servers[5],list1); clusterState.put(servers[6],list2); clusterState.put(servers[10],new ArrayList()); cluster=new Cluster(clusterState,null,null,rackManager); assertTrue(cluster.wouldLowerAvailability(hri1,servers[0])); cluster=new Cluster(clusterState,null,null,null); assertTrue(!cluster.wouldLowerAvailability(hri1,servers[6])); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test(timeout=180000) public void testClusterRegionLocations(){ List servers=getListOfServerNames(randomServers(10,10)); List regions=randomRegions(101); Map> clusterState=new HashMap>(); assignRegions(regions,servers,clusterState); RegionLocationFinder locationFinder=mock(RegionLocationFinder.class); when(locationFinder.getTopBlockLocations(regions.get(0))).thenReturn(Lists.newArrayList(servers.get(0))); when(locationFinder.getTopBlockLocations(regions.get(1))).thenReturn(Lists.newArrayList(servers.get(0),servers.get(1))); when(locationFinder.getTopBlockLocations(regions.get(42))).thenReturn(Lists.newArrayList(servers.get(4),servers.get(9),servers.get(5))); when(locationFinder.getTopBlockLocations(regions.get(43))).thenReturn(Lists.newArrayList(ServerName.valueOf("foo",0,0))); BaseLoadBalancer.Cluster cluster=new Cluster(clusterState,null,locationFinder,null); int r0=ArrayUtils.indexOf(cluster.regions,regions.get(0)); int r1=ArrayUtils.indexOf(cluster.regions,regions.get(1)); int r10=ArrayUtils.indexOf(cluster.regions,regions.get(10)); int r42=ArrayUtils.indexOf(cluster.regions,regions.get(42)); int r43=ArrayUtils.indexOf(cluster.regions,regions.get(43)); int s0=cluster.serversToIndex.get(servers.get(0).getHostAndPort()); int s1=cluster.serversToIndex.get(servers.get(1).getHostAndPort()); int s4=cluster.serversToIndex.get(servers.get(4).getHostAndPort()); int s5=cluster.serversToIndex.get(servers.get(5).getHostAndPort()); int s9=cluster.serversToIndex.get(servers.get(9).getHostAndPort()); assertEquals(1,cluster.regionLocations[r0].length); assertEquals(s0,cluster.regionLocations[r0][0]); assertEquals(2,cluster.regionLocations[r1].length); assertEquals(s0,cluster.regionLocations[r1][0]); assertEquals(s1,cluster.regionLocations[r1][1]); assertEquals(0,cluster.regionLocations[r10].length); assertEquals(3,cluster.regionLocations[r42].length); assertEquals(s4,cluster.regionLocations[r42][0]); assertEquals(s9,cluster.regionLocations[r42][1]); assertEquals(s5,cluster.regionLocations[r42][2]); assertEquals(1,cluster.regionLocations[r43].length); assertEquals(-1,cluster.regionLocations[r43][0]); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testRegionAvailabilityWithRegionMoves() throws Exception { List list0=new ArrayList(); List list1=new ArrayList(); List list2=new ArrayList(); HRegionInfo hri1=new HRegionInfo(TableName.valueOf("table"),"key1".getBytes(),"key2".getBytes(),false,100); HRegionInfo hri2=RegionReplicaUtil.getRegionInfoForReplica(hri1,1); HRegionInfo hri3=new HRegionInfo(TableName.valueOf("table"),"key2".getBytes(),"key3".getBytes(),false,101); list0.add(hri1); list1.add(hri2); list2.add(hri3); Map> clusterState=new LinkedHashMap>(); clusterState.put(servers[0],list0); clusterState.put(servers[1],list1); clusterState.put(servers[2],list2); Cluster cluster=new Cluster(clusterState,null,null,rackManager); assertTrue(!cluster.wouldLowerAvailability(hri1,servers[2])); cluster.doAction(new MoveRegionAction(0,0,2)); assertTrue(cluster.wouldLowerAvailability(hri1,servers[2])); clusterState.clear(); List list3=new ArrayList(); HRegionInfo hri4=RegionReplicaUtil.getRegionInfoForReplica(hri3,1); list3.add(hri4); clusterState.put(servers[0],list0); clusterState.put(servers[5],list1); clusterState.put(servers[6],list2); clusterState.put(servers[12],list3); cluster=new Cluster(clusterState,null,null,rackManager); assertTrue(!cluster.wouldLowerAvailability(hri4,servers[0])); cluster.doAction(new MoveRegionAction(2,2,0)); assertTrue(cluster.wouldLowerAvailability(hri3,servers[0])); }

Class: org.apache.hadoop.hbase.master.balancer.TestFavoredNodeAssignmentHelper

APIUtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithSingleRack(){ Map rackToServerCount=new HashMap(); rackToServerCount.put("rack1",10); Triple,FavoredNodeAssignmentHelper,List> primaryRSMapAndHelper=secondaryAndTertiaryRSPlacementHelper(60000,rackToServerCount); FavoredNodeAssignmentHelper helper=primaryRSMapAndHelper.getSecond(); Map primaryRSMap=primaryRSMapAndHelper.getFirst(); List regions=primaryRSMapAndHelper.getThird(); Map secondaryAndTertiaryMap=helper.placeSecondaryAndTertiaryRS(primaryRSMap); for ( HRegionInfo region : regions) { ServerName[] secondaryAndTertiaryServers=secondaryAndTertiaryMap.get(region); assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region))); assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region))); assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1])); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithMultipleRacks(){ Map rackToServerCount=new HashMap(); rackToServerCount.put("rack1",10); rackToServerCount.put("rack2",10); Triple,FavoredNodeAssignmentHelper,List> primaryRSMapAndHelper=secondaryAndTertiaryRSPlacementHelper(60000,rackToServerCount); FavoredNodeAssignmentHelper helper=primaryRSMapAndHelper.getSecond(); Map primaryRSMap=primaryRSMapAndHelper.getFirst(); assertTrue(primaryRSMap.size() == 60000); Map secondaryAndTertiaryMap=helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertTrue(secondaryAndTertiaryMap.size() == 60000); for ( Map.Entry entry : secondaryAndTertiaryMap.entrySet()) { ServerName[] allServersForRegion=entry.getValue(); String primaryRSRack=rackManager.getRack(primaryRSMap.get(entry.getKey())); String secondaryRSRack=rackManager.getRack(allServersForRegion[0]); String tertiaryRSRack=rackManager.getRack(allServersForRegion[1]); assertTrue(!primaryRSRack.equals(secondaryRSRack)); assertTrue(secondaryRSRack.equals(tertiaryRSRack)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks(){ Map rackToServerCount=new HashMap(); rackToServerCount.put("rack1",1); rackToServerCount.put("rack2",1); Triple,FavoredNodeAssignmentHelper,List> primaryRSMapAndHelper=secondaryAndTertiaryRSPlacementHelper(6,rackToServerCount); FavoredNodeAssignmentHelper helper=primaryRSMapAndHelper.getSecond(); Map primaryRSMap=primaryRSMapAndHelper.getFirst(); List regions=primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map secondaryAndTertiaryMap=helper.placeSecondaryAndTertiaryRS(primaryRSMap); for ( HRegionInfo region : regions) { assertTrue(secondaryAndTertiaryMap.get(region) == null); } }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack(){ Map rackToServerCount=new HashMap(); rackToServerCount.put("rack1",2); rackToServerCount.put("rack2",1); Triple,FavoredNodeAssignmentHelper,List> primaryRSMapAndHelper=secondaryAndTertiaryRSPlacementHelper(6,rackToServerCount); FavoredNodeAssignmentHelper helper=primaryRSMapAndHelper.getSecond(); Map primaryRSMap=primaryRSMapAndHelper.getFirst(); List regions=primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map secondaryAndTertiaryMap=helper.placeSecondaryAndTertiaryRS(primaryRSMap); for ( HRegionInfo region : regions) { ServerName s=primaryRSMap.get(region); ServerName secondaryRS=secondaryAndTertiaryMap.get(region)[0]; ServerName tertiaryRS=secondaryAndTertiaryMap.get(region)[1]; if (rackManager.getRack(s).equals("rack1")) { assertTrue(rackManager.getRack(secondaryRS).equals("rack2") && rackManager.getRack(tertiaryRS).equals("rack1")); } if (rackManager.getRack(s).equals("rack2")) { assertTrue(rackManager.getRack(secondaryRS).equals("rack1") && rackManager.getRack(tertiaryRS).equals("rack1")); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier IgnoredMethod 
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithSingleServer(){ Map rackToServerCount=new HashMap(); rackToServerCount.put("rack1",1); Triple,FavoredNodeAssignmentHelper,List> primaryRSMapAndHelper=secondaryAndTertiaryRSPlacementHelper(1,rackToServerCount); FavoredNodeAssignmentHelper helper=primaryRSMapAndHelper.getSecond(); Map primaryRSMap=primaryRSMapAndHelper.getFirst(); List regions=primaryRSMapAndHelper.getThird(); Map secondaryAndTertiaryMap=helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null); }

Class: org.apache.hadoop.hbase.master.balancer.TestRegionLocationFinder

IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testInternalGetTopBlockLocation() throws Exception { for (int i=0; i < ServerNum; i++) { HRegionServer server=cluster.getRegionServer(i); for ( Region region : server.getOnlineRegions(tableName)) { HDFSBlocksDistribution blocksDistribution1=region.getHDFSBlocksDistribution(); HDFSBlocksDistribution blocksDistribution2=finder.getBlockDistribution(region.getRegionInfo()); assertEquals(blocksDistribution1.getUniqueBlocksTotalWeight(),blocksDistribution2.getUniqueBlocksTotalWeight()); if (blocksDistribution1.getUniqueBlocksTotalWeight() != 0) { assertEquals(blocksDistribution1.getTopHosts().get(0),blocksDistribution2.getTopHosts().get(0)); } } } }

IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testGetTopBlockLocations() throws Exception { for (int i=0; i < ServerNum; i++) { HRegionServer server=cluster.getRegionServer(i); for ( Region region : server.getOnlineRegions(tableName)) { List servers=finder.getTopBlockLocations(region.getRegionInfo()); if (region.getHDFSBlocksDistribution().getUniqueBlocksTotalWeight() == 0) { continue; } List topHosts=region.getHDFSBlocksDistribution().getTopHosts(); if (!topHosts.contains(server.getServerName().getHostname())) { continue; } for (int j=0; j < ServerNum; j++) { ServerName serverName=cluster.getRegionServer(j).getServerName(); assertTrue(servers.contains(serverName)); } } } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMapHostNameToServerName() throws Exception { List topHosts=new ArrayList(); for (int i=0; i < ServerNum; i++) { HRegionServer server=cluster.getRegionServer(i); String serverHost=server.getServerName().getHostname(); if (!topHosts.contains(serverHost)) { topHosts.add(serverHost); } } List servers=finder.mapHostNameToServerName(topHosts); assertEquals(1,topHosts.size()); for (int i=0; i < ServerNum; i++) { ServerName server=cluster.getRegionServer(i).getServerName(); assertTrue(servers.contains(server)); } }

Class: org.apache.hadoop.hbase.master.balancer.TestServerAndLoad

InternalCallVerifier EqualityVerifier 
@Test public void test(){ ServerName server=ServerName.valueOf("host",12345,112244); int startcode=12; ServerAndLoad sal=new ServerAndLoad(server,startcode); assertEquals(sal.hashCode(),new ServerAndLoad(server,startcode).hashCode()); assertEquals(sal,new ServerAndLoad(server,startcode)); assertNotEquals(sal.hashCode(),new ServerAndLoad(server,startcode + 1).hashCode()); assertNotEquals(sal,new ServerAndLoad(server,startcode + 1)); ServerName other=ServerName.valueOf("other",12345,112244); assertNotEquals(sal.hashCode(),new ServerAndLoad(other,startcode).hashCode()); assertNotEquals(sal,new ServerAndLoad(other,startcode)); }

Class: org.apache.hadoop.hbase.master.balancer.TestStochasticLoadBalancer

InternalCallVerifier BooleanVerifier 
@Test public void testTableSkewCost(){ Configuration conf=HBaseConfiguration.create(); StochasticLoadBalancer.CostFunction costFunction=new StochasticLoadBalancer.TableSkewCostFunction(conf); for ( int[] mockCluster : clusterStateMocks) { BaseLoadBalancer.Cluster cluster=mockCluster(mockCluster); costFunction.init(cluster); double cost=costFunction.cost(); assertTrue(cost >= 0); assertTrue(cost <= 1.01); } }

InternalCallVerifier BooleanVerifier 
@Test public void testReplicaCost(){ Configuration conf=HBaseConfiguration.create(); StochasticLoadBalancer.CostFunction costFunction=new StochasticLoadBalancer.RegionReplicaHostCostFunction(conf); for ( int[] mockCluster : clusterStateMocks) { BaseLoadBalancer.Cluster cluster=mockCluster(mockCluster); costFunction.init(cluster); double cost=costFunction.cost(); assertTrue(cost >= 0); assertTrue(cost <= 1.01); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReplicaCostForReplicas(){ Configuration conf=HBaseConfiguration.create(); StochasticLoadBalancer.CostFunction costFunction=new StochasticLoadBalancer.RegionReplicaHostCostFunction(conf); int[] servers=new int[]{3,3,3,3,3}; TreeMap> clusterState=mockClusterServers(servers); BaseLoadBalancer.Cluster cluster; cluster=new BaseLoadBalancer.Cluster(clusterState,null,null,null); costFunction.init(cluster); double costWithoutReplicas=costFunction.cost(); assertEquals(0,costWithoutReplicas,0); HRegionInfo replica1=RegionReplicaUtil.getRegionInfoForReplica(clusterState.firstEntry().getValue().get(0),1); clusterState.lastEntry().getValue().add(replica1); cluster=new BaseLoadBalancer.Cluster(clusterState,null,null,null); costFunction.init(cluster); double costWith1ReplicaDifferentServer=costFunction.cost(); assertEquals(0,costWith1ReplicaDifferentServer,0); HRegionInfo replica2=RegionReplicaUtil.getRegionInfoForReplica(replica1,2); clusterState.lastEntry().getValue().add(replica2); cluster=new BaseLoadBalancer.Cluster(clusterState,null,null,null); costFunction.init(cluster); double costWith1ReplicaSameServer=costFunction.cost(); assertTrue(costWith1ReplicaDifferentServer < costWith1ReplicaSameServer); HRegionInfo replica3; Iterator>> it; Entry> entry; clusterState=mockClusterServers(servers); it=clusterState.entrySet().iterator(); entry=it.next(); HRegionInfo hri=entry.getValue().get(0); replica1=RegionReplicaUtil.getRegionInfoForReplica(hri,1); replica2=RegionReplicaUtil.getRegionInfoForReplica(hri,2); replica3=RegionReplicaUtil.getRegionInfoForReplica(hri,3); entry.getValue().add(replica1); entry.getValue().add(replica2); it.next().getValue().add(replica3); cluster=new BaseLoadBalancer.Cluster(clusterState,null,null,null); costFunction.init(cluster); double costWith3ReplicasSameServer=costFunction.cost(); clusterState=mockClusterServers(servers); hri=clusterState.firstEntry().getValue().get(0); replica1=RegionReplicaUtil.getRegionInfoForReplica(hri,1); replica2=RegionReplicaUtil.getRegionInfoForReplica(hri,2); replica3=RegionReplicaUtil.getRegionInfoForReplica(hri,3); clusterState.firstEntry().getValue().add(replica1); clusterState.lastEntry().getValue().add(replica2); clusterState.lastEntry().getValue().add(replica3); cluster=new BaseLoadBalancer.Cluster(clusterState,null,null,null); costFunction.init(cluster); double costWith2ReplicasOnTwoServers=costFunction.cost(); assertTrue(costWith2ReplicasOnTwoServers < costWith3ReplicasSameServer); }

InternalCallVerifier EqualityVerifier 
@Test public void testMoveCost() throws Exception { Configuration conf=HBaseConfiguration.create(); StochasticLoadBalancer.CostFunction costFunction=new StochasticLoadBalancer.MoveCostFunction(conf); for ( int[] mockCluster : clusterStateMocks) { BaseLoadBalancer.Cluster cluster=mockCluster(mockCluster); costFunction.init(cluster); double cost=costFunction.cost(); assertEquals(0.0f,cost,0.001); cluster.setNumRegions(200); cluster.setNumMovedRegions(10); cost=costFunction.cost(); assertEquals(0.05f,cost,0.001); cluster.setNumMovedRegions(100); cost=costFunction.cost(); assertEquals(0.5f,cost,0.001); cluster.setNumMovedRegions(200); cost=costFunction.cost(); assertEquals(1.0f,cost,0.001); cluster.setNumRegions(10000); cluster.setNumMovedRegions(250); cost=costFunction.cost(); assertEquals(0.1f,cost,0.001); cluster.setNumMovedRegions(1250); cost=costFunction.cost(); assertEquals(0.5f,cost,0.001); cluster.setNumMovedRegions(2500); cost=costFunction.cost(); assertEquals(1.0f,cost,0.01); } }

InternalCallVerifier EqualityVerifier 
@Test public void testCostFromArray(){ Configuration conf=HBaseConfiguration.create(); StochasticLoadBalancer.CostFromRegionLoadFunction costFunction=new StochasticLoadBalancer.MemstoreSizeCostFunction(conf); costFunction.init(mockCluster(new int[]{0,0,0,0,1})); double[] statOne=new double[100]; for (int i=0; i < 100; i++) { statOne[i]=10; } assertEquals(0,costFunction.costFromArray(statOne),0.01); double[] statTwo=new double[101]; for (int i=0; i < 100; i++) { statTwo[i]=0; } statTwo[100]=100; assertEquals(1,costFunction.costFromArray(statTwo),0.01); double[] statThree=new double[200]; for (int i=0; i < 100; i++) { statThree[i]=(0); statThree[i + 100]=100; } assertEquals(0.5,costFunction.costFromArray(statThree),0.01); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSkewCost(){ Configuration conf=HBaseConfiguration.create(); StochasticLoadBalancer.CostFunction costFunction=new StochasticLoadBalancer.RegionCountSkewCostFunction(conf); for ( int[] mockCluster : clusterStateMocks) { costFunction.init(mockCluster(mockCluster)); double cost=costFunction.cost(); assertTrue(cost >= 0); assertTrue(cost <= 1.01); } costFunction.init(mockCluster(new int[]{0,0,0,0,1})); assertEquals(0,costFunction.cost(),0.01); costFunction.init(mockCluster(new int[]{0,0,0,1,1})); assertEquals(0,costFunction.cost(),0.01); costFunction.init(mockCluster(new int[]{0,0,1,1,1})); assertEquals(0,costFunction.cost(),0.01); costFunction.init(mockCluster(new int[]{0,1,1,1,1})); assertEquals(0,costFunction.cost(),0.01); costFunction.init(mockCluster(new int[]{1,1,1,1,1})); assertEquals(0,costFunction.cost(),0.01); costFunction.init(mockCluster(new int[]{10000,0,0,0,0})); assertEquals(1,costFunction.cost(),0.01); }

Class: org.apache.hadoop.hbase.master.cleaner.TestCleanerChore

InternalCallVerifier BooleanVerifier 
@Test public void testSavesFilesOnRequest() throws Exception { Stoppable stop=new StoppableImplementation(); Configuration conf=UTIL.getConfiguration(); Path testDir=UTIL.getDataTestDir(); FileSystem fs=UTIL.getTestFileSystem(); String confKey="hbase.test.cleaner.delegates"; conf.set(confKey,NeverDelete.class.getName()); AllValidPaths chore=new AllValidPaths("test-file-cleaner",stop,conf,fs,testDir,confKey); Path parent=new Path(testDir,"parent"); Path file=new Path(parent,"someFile"); fs.mkdirs(parent); fs.create(file).close(); assertTrue("Test file didn't get created.",fs.exists(file)); chore.chore(); assertTrue("File didn't get deleted",fs.exists(file)); assertTrue("Empty directory didn't get deleted",fs.exists(parent)); }

InternalCallVerifier BooleanVerifier 
/** * While cleaning a directory, all the files in the directory may be deleted, but there may be * another file added, in which case the directory shouldn't be deleted. * @throws IOException on failure */ @Test public void testCleanerDoesNotDeleteDirectoryWithLateAddedFiles() throws IOException { Stoppable stop=new StoppableImplementation(); Configuration conf=UTIL.getConfiguration(); final Path testDir=UTIL.getDataTestDir(); final FileSystem fs=UTIL.getTestFileSystem(); String confKey="hbase.test.cleaner.delegates"; conf.set(confKey,AlwaysDelete.class.getName()); AllValidPaths chore=new AllValidPaths("test-file-cleaner",stop,conf,fs,testDir,confKey); AlwaysDelete delegate=(AlwaysDelete)chore.cleanersChain.get(0); AlwaysDelete spy=Mockito.spy(delegate); chore.cleanersChain.set(0,spy); final Path parent=new Path(testDir,"parent"); Path file=new Path(parent,"someFile"); fs.mkdirs(parent); fs.create(file).close(); assertTrue("Test file didn't get created.",fs.exists(file)); final Path addedFile=new Path(parent,"addedFile"); Mockito.doAnswer(new Answer(){ @Override public Boolean answer( InvocationOnMock invocation) throws Throwable { fs.create(addedFile).close(); FSUtils.logFileSystemState(fs,testDir,LOG); return (Boolean)invocation.callRealMethod(); } } ).when(spy).isFileDeletable(Mockito.any(FileStatus.class)); chore.chore(); assertTrue("Added file unexpectedly deleted",fs.exists(addedFile)); assertTrue("Parent directory deleted unexpectedly",fs.exists(parent)); assertFalse("Original file unexpectedly retained",fs.exists(file)); Mockito.verify(spy,Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class)); Mockito.reset(spy); }

InternalCallVerifier BooleanVerifier 
/** * Test to make sure that we don't attempt to ask the delegate whether or not we should preserve a * directory. * @throws Exception on failure */ @Test public void testDoesNotCheckDirectories() throws Exception { Stoppable stop=new StoppableImplementation(); Configuration conf=UTIL.getConfiguration(); Path testDir=UTIL.getDataTestDir(); FileSystem fs=UTIL.getTestFileSystem(); String confKey="hbase.test.cleaner.delegates"; conf.set(confKey,AlwaysDelete.class.getName()); AllValidPaths chore=new AllValidPaths("test-file-cleaner",stop,conf,fs,testDir,confKey); AlwaysDelete delegate=(AlwaysDelete)chore.cleanersChain.get(0); AlwaysDelete spy=Mockito.spy(delegate); chore.cleanersChain.set(0,spy); Path parent=new Path(testDir,"parent"); Path file=new Path(parent,"someFile"); fs.mkdirs(parent); assertTrue("Test parent didn't get created.",fs.exists(parent)); fs.create(file).close(); assertTrue("Test file didn't get created.",fs.exists(file)); FileStatus fStat=fs.getFileStatus(parent); chore.chore(); Mockito.verify(spy,Mockito.never()).isFileDeletable(fStat); Mockito.reset(spy); }

InternalCallVerifier BooleanVerifier 
@Test public void testDeletesEmptyDirectories() throws Exception { Stoppable stop=new StoppableImplementation(); Configuration conf=UTIL.getConfiguration(); Path testDir=UTIL.getDataTestDir(); FileSystem fs=UTIL.getTestFileSystem(); String confKey="hbase.test.cleaner.delegates"; conf.set(confKey,AlwaysDelete.class.getName()); AllValidPaths chore=new AllValidPaths("test-file-cleaner",stop,conf,fs,testDir,confKey); Path parent=new Path(testDir,"parent"); Path child=new Path(parent,"child"); Path emptyChild=new Path(parent,"emptyChild"); Path file=new Path(child,"someFile"); fs.mkdirs(child); fs.mkdirs(emptyChild); fs.create(file).close(); Path topFile=new Path(testDir,"topFile"); fs.create(topFile).close(); assertTrue("Test file didn't get created.",fs.exists(file)); assertTrue("Test file didn't get created.",fs.exists(topFile)); chore.chore(); assertFalse("File didn't get deleted",fs.exists(topFile)); assertFalse("File didn't get deleted",fs.exists(file)); assertFalse("Empty directory didn't get deleted",fs.exists(child)); assertFalse("Empty directory didn't get deleted",fs.exists(parent)); }

InternalCallVerifier BooleanVerifier 
@Test public void testStoppedCleanerDoesNotDeleteFiles() throws Exception { Stoppable stop=new StoppableImplementation(); Configuration conf=UTIL.getConfiguration(); Path testDir=UTIL.getDataTestDir(); FileSystem fs=UTIL.getTestFileSystem(); String confKey="hbase.test.cleaner.delegates"; conf.set(confKey,AlwaysDelete.class.getName()); AllValidPaths chore=new AllValidPaths("test-file-cleaner",stop,conf,fs,testDir,confKey); Path topFile=new Path(testDir,"topFile"); fs.create(topFile).close(); assertTrue("Test file didn't get created.",fs.exists(topFile)); stop.stop("testing stop"); chore.chore(); assertTrue("File got deleted while chore was stopped",fs.exists(topFile)); }

InternalCallVerifier BooleanVerifier 
/** * The cleaner runs in a loop, where it first checks to see all the files under a directory can be * deleted. If they all can, then we try to delete the directory. However, a file may be added * that directory to after the original check. This ensures that we don't accidentally delete that * directory on and don't get spurious IOExceptions. *

* This was from HBASE-7465. * @throws Exception on failure */ @Test public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception { Stoppable stop=new StoppableImplementation(); HBaseTestingUtility localUtil=new HBaseTestingUtility(); Configuration conf=localUtil.getConfiguration(); final Path testDir=UTIL.getDataTestDir(); final FileSystem fs=UTIL.getTestFileSystem(); LOG.debug("Writing test data to: " + testDir); String confKey="hbase.test.cleaner.delegates"; conf.set(confKey,AlwaysDelete.class.getName()); AllValidPaths chore=new AllValidPaths("test-file-cleaner",stop,conf,fs,testDir,confKey); AlwaysDelete delegate=(AlwaysDelete)chore.cleanersChain.get(0); AlwaysDelete spy=Mockito.spy(delegate); chore.cleanersChain.set(0,spy); final Path parent=new Path(testDir,"parent"); Path file=new Path(parent,"someFile"); fs.mkdirs(parent); fs.create(file).close(); assertTrue("Test file didn't get created.",fs.exists(file)); final Path racyFile=new Path(parent,"addedFile"); Mockito.doAnswer(new Answer(){ @Override public Boolean answer( InvocationOnMock invocation) throws Throwable { fs.create(racyFile).close(); FSUtils.logFileSystemState(fs,testDir,LOG); return (Boolean)invocation.callRealMethod(); } } ).when(spy).isFileDeletable(Mockito.any(FileStatus.class)); if (chore.checkAndDeleteDirectory(parent)) { throw new Exception("Reported success deleting directory, should have failed when adding file mid-iteration"); } assertTrue("Added file unexpectedly deleted",fs.exists(racyFile)); assertTrue("Parent directory deleted unexpectedly",fs.exists(parent)); assertFalse("Original file unexpectedly retained",fs.exists(file)); Mockito.verify(spy,Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class)); }


Class: org.apache.hadoop.hbase.master.cleaner.TestHFileCleaner

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=60 * 1000) public void testHFileCleaning() throws Exception { final EnvironmentEdge originalEdge=EnvironmentEdgeManager.getDelegate(); String prefix="someHFileThatWouldBeAUUID"; Configuration conf=UTIL.getConfiguration(); long ttl=2000; conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner"); conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY,ttl); Server server=new DummyServer(); Path archivedHfileDir=new Path(UTIL.getDataTestDirOnTestFS(),HConstants.HFILE_ARCHIVE_DIRECTORY); FileSystem fs=FileSystem.get(conf); HFileCleaner cleaner=new HFileCleaner(1000,server,conf,fs,archivedHfileDir); final long createTime=System.currentTimeMillis(); fs.delete(archivedHfileDir,true); fs.mkdirs(archivedHfileDir); fs.createNewFile(new Path(archivedHfileDir,"dfd-dfd")); LOG.debug("Now is: " + createTime); for (int i=1; i < 32; i++) { Path fileName=new Path(archivedHfileDir,(prefix + "." + (createTime + i))); fs.createNewFile(fileName); fs.setTimes(fileName,createTime - ttl - 1,-1); LOG.debug("Creating " + getFileStats(fileName,fs)); } Path saved=new Path(archivedHfileDir,prefix + ".00000000000"); fs.createNewFile(saved); fs.setTimes(saved,createTime - ttl / 2,-1); LOG.debug("Creating " + getFileStats(saved,fs)); for ( FileStatus stat : fs.listStatus(archivedHfileDir)) { LOG.debug(stat.getPath().toString()); } assertEquals(33,fs.listStatus(archivedHfileDir).length); EnvironmentEdge setTime=new EnvironmentEdge(){ @Override public long currentTime(){ return createTime; } } ; EnvironmentEdgeManager.injectEdge(setTime); cleaner.chore(); assertEquals(1,fs.listStatus(archivedHfileDir).length); for ( FileStatus file : fs.listStatus(archivedHfileDir)) { LOG.debug("Kept hfiles: " + file.getPath().getName()); } EnvironmentEdgeManager.injectEdge(originalEdge); }

InternalCallVerifier BooleanVerifier 
@Test public void testTTLCleaner() throws IOException, InterruptedException { FileSystem fs=UTIL.getDFSCluster().getFileSystem(); Path root=UTIL.getDataTestDirOnTestFS(); Path file=new Path(root,"file"); fs.createNewFile(file); long createTime=System.currentTimeMillis(); assertTrue("Test file not created!",fs.exists(file)); TimeToLiveHFileCleaner cleaner=new TimeToLiveHFileCleaner(); fs.setTimes(file,createTime - 100,-1); Configuration conf=UTIL.getConfiguration(); conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY,100); cleaner.setConf(conf); assertTrue("File not set deletable - check mod time:" + getFileStats(file,fs) + " with create time:"+ createTime,cleaner.isFileDeletable(fs.getFileStatus(file))); }

InternalCallVerifier BooleanVerifier 
@Test public void testRemovesEmptyDirectories() throws Exception { Configuration conf=UTIL.getConfiguration(); conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,""); Server server=new DummyServer(); Path archivedHfileDir=new Path(UTIL.getDataTestDirOnTestFS(),HConstants.HFILE_ARCHIVE_DIRECTORY); FileSystem fs=UTIL.getDFSCluster().getFileSystem(); HFileCleaner cleaner=new HFileCleaner(1000,server,conf,fs,archivedHfileDir); Path table=new Path(archivedHfileDir,"table"); Path region=new Path(table,"regionsomthing"); Path family=new Path(region,"fam"); Path file=new Path(family,"file12345"); fs.mkdirs(family); if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family); fs.create(file).close(); if (!fs.exists(file)) throw new RuntimeException("Test file didn't get created:" + file); cleaner.chore(); assertFalse("family directory not removed for empty directory",fs.exists(family)); assertFalse("region directory not removed for empty directory",fs.exists(region)); assertFalse("table directory not removed for empty directory",fs.exists(table)); assertTrue("archive directory",fs.exists(archivedHfileDir)); }

Class: org.apache.hadoop.hbase.master.cleaner.TestHFileLinkCleaner

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHFileLinkCleaning() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); FSUtils.setRootDir(conf,TEST_UTIL.getDataTestDir()); conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,HFileLinkCleaner.class.getName()); Path rootDir=FSUtils.getRootDir(conf); FileSystem fs=FileSystem.get(conf); final TableName tableName=TableName.valueOf("test-table"); final TableName tableLinkName=TableName.valueOf("test-link"); final String hfileName="1234567890"; final String familyName="cf"; HRegionInfo hri=new HRegionInfo(tableName); HRegionInfo hriLink=new HRegionInfo(tableLinkName); Path archiveDir=HFileArchiveUtil.getArchivePath(conf); Path archiveStoreDir=HFileArchiveUtil.getStoreArchivePath(conf,tableName,hri.getEncodedName(),familyName); Path archiveLinkStoreDir=HFileArchiveUtil.getStoreArchivePath(conf,tableLinkName,hriLink.getEncodedName(),familyName); Path familyPath=getFamilyDirPath(archiveDir,tableName,hri.getEncodedName(),familyName); fs.mkdirs(familyPath); Path hfilePath=new Path(familyPath,hfileName); fs.createNewFile(hfilePath); Path familyLinkPath=getFamilyDirPath(rootDir,tableLinkName,hriLink.getEncodedName(),familyName); fs.mkdirs(familyLinkPath); HFileLink.create(conf,fs,familyLinkPath,hri,hfileName); Path linkBackRefDir=HFileLink.getBackReferencesDir(archiveStoreDir,hfileName); assertTrue(fs.exists(linkBackRefDir)); FileStatus[] backRefs=fs.listStatus(linkBackRefDir); assertEquals(1,backRefs.length); Path linkBackRef=backRefs[0].getPath(); final long ttl=1000; conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY,ttl); Server server=new DummyServer(); HFileCleaner cleaner=new HFileCleaner(1000,server,conf,fs,archiveDir); cleaner.chore(); assertTrue(fs.exists(linkBackRef)); assertTrue(fs.exists(hfilePath)); fs.rename(FSUtils.getTableDir(rootDir,tableLinkName),FSUtils.getTableDir(archiveDir,tableLinkName)); cleaner.chore(); assertFalse("Link should be deleted",fs.exists(linkBackRef)); Thread.sleep(ttl * 2); cleaner.chore(); assertFalse("HFile should be deleted",fs.exists(hfilePath)); for (int i=0; i < 4; ++i) { Thread.sleep(ttl * 2); cleaner.chore(); } assertFalse("HFile should be deleted",fs.exists(FSUtils.getTableDir(archiveDir,tableName))); assertFalse("Link should be deleted",fs.exists(FSUtils.getTableDir(archiveDir,tableLinkName))); }

Class: org.apache.hadoop.hbase.master.cleaner.TestLogsCleaner

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLogCleaning() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); long ttl=10000; conf.setLong("hbase.master.logcleaner.ttl",ttl); conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY,HConstants.REPLICATION_ENABLE_DEFAULT); Replication.decorateMasterConfiguration(conf); Server server=new DummyServer(); ReplicationQueues repQueues=ReplicationFactory.getReplicationQueues(server.getZooKeeper(),conf,server); repQueues.init(server.getServerName().toString()); final Path oldLogDir=new Path(TEST_UTIL.getDataTestDir(),HConstants.HREGION_OLDLOGDIR_NAME); String fakeMachineName=URLEncoder.encode(server.getServerName().toString(),"UTF8"); final FileSystem fs=FileSystem.get(conf); long now=System.currentTimeMillis(); fs.delete(oldLogDir,true); fs.mkdirs(oldLogDir); fs.createNewFile(new Path(oldLogDir,"a")); fs.createNewFile(new Path(oldLogDir,fakeMachineName + "." + "a")); System.out.println("Now is: " + now); for (int i=1; i < 31; i++) { Path fileName=new Path(oldLogDir,fakeMachineName + "." + (now - i)); fs.createNewFile(fileName); if (i % (30 / 3) == 1) { repQueues.addLog(fakeMachineName,fileName.getName()); System.out.println("Replication log file: " + fileName); } } Thread.sleep(ttl); fs.createNewFile(new Path(oldLogDir,fakeMachineName + "." + now)); fs.createNewFile(new Path(oldLogDir,fakeMachineName + "." + (now + 10000))); for ( FileStatus stat : fs.listStatus(oldLogDir)) { System.out.println(stat.getPath().toString()); } assertEquals(34,fs.listStatus(oldLogDir).length); LogCleaner cleaner=new LogCleaner(1000,server,conf,fs,oldLogDir); cleaner.chore(); TEST_UTIL.waitFor(1000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return 5 == fs.listStatus(oldLogDir).length; } } ); for ( FileStatus file : fs.listStatus(oldLogDir)) { System.out.println("Kept log files: " + file.getPath().getName()); } }

Class: org.apache.hadoop.hbase.master.cleaner.TestReplicationHFileCleaner

InternalCallVerifier BooleanVerifier 
@Test public void testIsFileDeletable() throws IOException, ReplicationException { Path file=new Path(root,"testIsFileDeletableWithNoHFileRefs"); fs.createNewFile(file); assertTrue("Test file not created!",fs.exists(file)); ReplicationHFileCleaner cleaner=new ReplicationHFileCleaner(); cleaner.setConf(conf); assertTrue("Cleaner should allow to delete this file as there is no hfile reference node " + "for it in the queue.",cleaner.isFileDeletable(fs.getFileStatus(file))); List files=new ArrayList(1); files.add(file.getName()); rq.addHFileRefs(peerId,files); assertFalse("Cleaner should not allow to delete this file as there is a hfile reference node " + "for it in the queue.",cleaner.isFileDeletable(fs.getFileStatus(file))); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=15000) public void testForDifferntHFileRefsZnodeVersion() throws Exception { Path file=new Path(root,"testForDifferntHFileRefsZnodeVersion"); fs.createNewFile(file); assertTrue("Test file not created!",fs.exists(file)); ReplicationHFileCleaner cleaner=new ReplicationHFileCleaner(); cleaner.setConf(conf); ReplicationQueuesClient replicationQueuesClient=Mockito.mock(ReplicationQueuesClient.class); Mockito.when(replicationQueuesClient.getHFileRefsNodeChangeVersion()).thenReturn(1,2); Class cleanerClass=cleaner.getClass(); Field rqc=cleanerClass.getDeclaredField("rqc"); rqc.setAccessible(true); rqc.set(cleaner,replicationQueuesClient); cleaner.isFileDeletable(fs.getFileStatus(file)); }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testGetDeletableFiles() throws Exception { Path notDeletablefile=new Path(root,"testGetDeletableFiles_1"); fs.createNewFile(notDeletablefile); assertTrue("Test file not created!",fs.exists(notDeletablefile)); Path deletablefile=new Path(root,"testGetDeletableFiles_2"); fs.createNewFile(deletablefile); assertTrue("Test file not created!",fs.exists(deletablefile)); List files=new ArrayList(2); FileStatus f=new FileStatus(); f.setPath(deletablefile); files.add(f); f=new FileStatus(); f.setPath(notDeletablefile); files.add(f); List hfiles=new ArrayList<>(1); hfiles.add(notDeletablefile.getName()); rq.addHFileRefs(peerId,hfiles); ReplicationHFileCleaner cleaner=new ReplicationHFileCleaner(); cleaner.setConf(conf); Iterator deletableFilesIterator=cleaner.getDeletableFiles(files).iterator(); int i=0; while (deletableFilesIterator.hasNext() && i < 2) { i++; } if (i > 2) { fail("File " + notDeletablefile + " should not be deletable as its hfile reference node is not added."); } assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile)); }

Class: org.apache.hadoop.hbase.master.cleaner.TestSnapshotFromMaster

InternalCallVerifier BooleanVerifier 
/** * Test that the contract from the master for checking on a snapshot are valid. *

*

    *
  1. If a snapshot fails with an error, we expect to get the source error.
  2. *
  3. If there is no snapshot name supplied, we should get an error.
  4. *
  5. If asking about a snapshot has hasn't occurred, you should get an error.
  6. *
*/ @Test(timeout=300000) public void testIsDoneContract() throws Exception { IsSnapshotDoneRequest.Builder builder=IsSnapshotDoneRequest.newBuilder(); String snapshotName="asyncExpectedFailureTest"; SnapshotTestingUtils.expectSnapshotDoneException(master,builder.build(),UnknownSnapshotException.class); SnapshotDescription desc=SnapshotDescription.newBuilder().setName(snapshotName).setTable(TABLE_NAME.getNameAsString()).build(); builder.setSnapshot(desc); SnapshotTestingUtils.expectSnapshotDoneException(master,builder.build(),UnknownSnapshotException.class); DisabledTableSnapshotHandler mockHandler=Mockito.mock(DisabledTableSnapshotHandler.class); Mockito.when(mockHandler.getException()).thenReturn(null); Mockito.when(mockHandler.getSnapshot()).thenReturn(desc); Mockito.when(mockHandler.isFinished()).thenReturn(new Boolean(true)); Mockito.when(mockHandler.getCompletionTimestamp()).thenReturn(EnvironmentEdgeManager.currentTime()); master.getSnapshotManagerForTesting().setSnapshotHandlerForTesting(TABLE_NAME,mockHandler); builder=IsSnapshotDoneRequest.newBuilder(); SnapshotTestingUtils.expectSnapshotDoneException(master,builder.build(),UnknownSnapshotException.class); builder.setSnapshot(desc); IsSnapshotDoneResponse response=master.getMasterRpcServices().isSnapshotDone(null,builder.build()); assertTrue("Snapshot didn't complete when it should have.",response.getDone()); builder.setSnapshot(SnapshotDescription.newBuilder().setName("Not A Snapshot").build()); SnapshotTestingUtils.expectSnapshotDoneException(master,builder.build(),UnknownSnapshotException.class); snapshotName="completed"; Path snapshotDir=SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,rootDir); desc=desc.toBuilder().setName(snapshotName).build(); SnapshotDescriptionUtils.writeSnapshotInfo(desc,snapshotDir,fs); builder.setSnapshot(desc); response=master.getMasterRpcServices().isSnapshotDone(null,builder.build()); assertTrue("Completed, on-disk snapshot not found",response.getDone()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testGetCompletedSnapshots() throws Exception { GetCompletedSnapshotsRequest request=GetCompletedSnapshotsRequest.newBuilder().build(); GetCompletedSnapshotsResponse response=master.getMasterRpcServices().getCompletedSnapshots(null,request); assertEquals("Found unexpected number of snapshots",0,response.getSnapshotsCount()); String snapshotName="completed"; Path snapshotDir=SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,rootDir); SnapshotDescription snapshot=SnapshotDescription.newBuilder().setName(snapshotName).build(); SnapshotDescriptionUtils.writeSnapshotInfo(snapshot,snapshotDir,fs); response=master.getMasterRpcServices().getCompletedSnapshots(null,request); assertEquals("Found unexpected number of snapshots",1,response.getSnapshotsCount()); List snapshots=response.getSnapshotsList(); List expected=Lists.newArrayList(snapshot); assertEquals("Returned snapshots don't match created snapshots",expected,snapshots); snapshotName="completed_two"; snapshotDir=SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,rootDir); snapshot=SnapshotDescription.newBuilder().setName(snapshotName).build(); SnapshotDescriptionUtils.writeSnapshotInfo(snapshot,snapshotDir,fs); expected.add(snapshot); response=master.getMasterRpcServices().getCompletedSnapshots(null,request); assertEquals("Found unexpected number of snapshots",2,response.getSnapshotsCount()); snapshots=response.getSnapshotsList(); assertEquals("Returned snapshots don't match created snapshots",expected,snapshots); }

Class: org.apache.hadoop.hbase.master.handler.TestCreateTableHandler

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testMasterRestartAfterEnablingNodeIsCreated() throws Exception { byte[] tableName=Bytes.toBytes("testMasterRestartAfterEnablingNodeIsCreated"); final MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); final HMaster m=cluster.getMaster(); final HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); final HRegionInfo[] hRegionInfos=new HRegionInfo[]{new HRegionInfo(desc.getTableName(),null,null)}; CustomCreateTableHandler handler=new CustomCreateTableHandler(m,m.getMasterFileSystem(),desc,cluster.getConfiguration(),hRegionInfos,m); handler.prepare(); throwException=true; handler.process(); abortAndStartNewMaster(cluster); assertTrue(cluster.getLiveMasterThreads().size() == 1); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testCreateTableWithSplitRegion() throws Exception { final TableName tableName=TableName.valueOf("testCreateTableWithSplitRegion"); final MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); final HMaster m=cluster.getMaster(); final HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); byte[] splitPoint=Bytes.toBytes("split-point"); long ts=System.currentTimeMillis(); HRegionInfo d1=new HRegionInfo(desc.getTableName(),null,splitPoint,false,ts); HRegionInfo d2=new HRegionInfo(desc.getTableName(),splitPoint,null,false,ts + 1); HRegionInfo parent=new HRegionInfo(desc.getTableName(),null,null,true,ts + 2); parent.setOffline(true); Path tempdir=m.getMasterFileSystem().getTempDir(); FileSystem fs=m.getMasterFileSystem().getFileSystem(); Path tempTableDir=FSUtils.getTableDir(tempdir,desc.getTableName()); fs.delete(tempTableDir,true); final HRegionInfo[] hRegionInfos=new HRegionInfo[]{d1,d2,parent}; CreateTableHandler handler=new CreateTableHandler(m,m.getMasterFileSystem(),desc,cluster.getConfiguration(),hRegionInfos,m); handler.prepare(); handler.process(); for (int i=0; i < 100; i++) { if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)) { Thread.sleep(300); } } assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName)); assertTrue(TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)); assertTrue(TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName,new byte[][]{splitPoint})); RegionStates regionStates=m.getAssignmentManager().getRegionStates(); assertTrue("Parent should be in SPLIT state",regionStates.isRegionInState(parent,State.SPLIT)); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testCreateTableCalledTwiceAndFirstOneInProgress() throws Exception { final TableName tableName=TableName.valueOf("testCreateTableCalledTwiceAndFirstOneInProgress"); final MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); final HMaster m=cluster.getMaster(); final HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); final HRegionInfo[] hRegionInfos=new HRegionInfo[]{new HRegionInfo(desc.getTableName(),null,null)}; CustomCreateTableHandler handler=new CustomCreateTableHandler(m,m.getMasterFileSystem(),desc,cluster.getConfiguration(),hRegionInfos,m); handler.prepare(); throwException=true; handler.process(); throwException=false; CustomCreateTableHandler handler1=new CustomCreateTableHandler(m,m.getMasterFileSystem(),desc,cluster.getConfiguration(),hRegionInfos,m); handler1.prepare(); handler1.process(); for (int i=0; i < 100; i++) { if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)) { Thread.sleep(200); } } assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName)); }

Class: org.apache.hadoop.hbase.master.handler.TestEnableTableHandler

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testEnableTableWithNoRegionServers() throws Exception { final TableName tableName=TableName.valueOf("testEnableTableWithNoRegionServers"); final MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); final HMaster m=cluster.getMaster(); final Admin admin=TEST_UTIL.getHBaseAdmin(); final HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); admin.createTable(desc); admin.disableTable(tableName); TEST_UTIL.waitTableDisabled(tableName.getName()); admin.enableTable(tableName); TEST_UTIL.waitTableEnabled(tableName); admin.disableTable(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(60000); JVMClusterUtil.RegionServerThread rs=cluster.getRegionServerThreads().get(0); rs.getRegionServer().stop("stop"); cluster.waitForRegionServerToStop(rs.getRegionServer().getServerName(),10000); LOG.debug("Now enabling table " + tableName); admin.enableTable(tableName); assertTrue(admin.isTableEnabled(tableName)); JVMClusterUtil.RegionServerThread rs2=cluster.startRegionServer(); cluster.waitForRegionServerToStart(rs2.getRegionServer().getServerName().getHostname(),rs2.getRegionServer().getServerName().getPort(),60000); List regions=TEST_UTIL.getHBaseAdmin().getTableRegions(tableName); assertEquals(1,regions.size()); for ( HRegionInfo region : regions) { TEST_UTIL.getHBaseAdmin().assign(region.getEncodedNameAsBytes()); } LOG.debug("Waiting for table assigned " + tableName); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); List onlineRegions=admin.getOnlineRegions(rs2.getRegionServer().getServerName()); ArrayList tableRegions=filterTableRegions(tableName,onlineRegions); assertEquals(1,tableRegions.size()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * We were only clearing rows that had a hregioninfo column in hbase:meta. Mangled rows that * were missing the hregioninfo because of error were being left behind messing up any * subsequent table made with the same name. HBASE-12980 * @throws IOException * @throws InterruptedException */ @Test(timeout=60000) public void testDeleteForSureClearsAllTableRowsFromMeta() throws IOException, InterruptedException { final TableName tableName=TableName.valueOf("testDeleteForSureClearsAllTableRowsFromMeta"); final Admin admin=TEST_UTIL.getHBaseAdmin(); final HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); try { createTable(TEST_UTIL,desc,HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); } catch ( Exception e) { e.printStackTrace(); fail("Got an exception while creating " + tableName); } try (Table metaTable=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)){ try (ResultScanner scanner=metaTable.getScanner(MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(),tableName))){ for ( Result result : scanner) { Delete d=new Delete(result.getRow()); d.addColumn(HConstants.CATALOG_FAMILY,HConstants.REGIONINFO_QUALIFIER); LOG.info("Mangled: " + d); metaTable.delete(d); break; } } admin.disableTable(tableName); TEST_UTIL.waitTableDisabled(tableName.getName()); try { deleteTable(TEST_UTIL,tableName); } catch ( Exception e) { e.printStackTrace(); fail("Got an exception while deleting " + tableName); } int rowCount=0; try (ResultScanner scanner=metaTable.getScanner(MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(),tableName))){ for ( Result result : scanner) { LOG.info("Found when none expected: " + result); rowCount++; } } assertEquals(0,rowCount); } }

Class: org.apache.hadoop.hbase.master.handler.TestTableDeleteFamilyHandler

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void deleteColumnFamilyTwice() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); HTableDescriptor beforehtd=admin.getTableDescriptor(TABLENAME); String cfToDelete="cf1"; FileSystem fs=TEST_UTIL.getDFSCluster().getFileSystem(); assertTrue(admin.isTableAvailable(TABLENAME)); HColumnDescriptor[] families=beforehtd.getColumnFamilies(); Boolean foundCF=false; int i; for (i=0; i < families.length; i++) { if (families[i].getNameAsString().equals(cfToDelete)) { foundCF=true; break; } } assertTrue(foundCF); Path tableDir=FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(),TABLENAME); assertTrue(fs.exists(tableDir)); FileStatus[] fileStatus=fs.listStatus(tableDir); foundCF=false; for (i=0; i < fileStatus.length; i++) { if (fileStatus[i].isDirectory() == true) { FileStatus[] cf=fs.listStatus(fileStatus[i].getPath(),new PathFilter(){ @Override public boolean accept( Path p){ if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { return false; } return true; } } ); for (int j=0; j < cf.length; j++) { if (cf[j].isDirectory() == true && cf[j].getPath().getName().equals(cfToDelete)) { foundCF=true; break; } } } if (foundCF) { break; } } assertTrue(foundCF); if (admin.isTableEnabled(TABLENAME)) { admin.disableTable(TABLENAME); } admin.deleteColumnFamily(TABLENAME,Bytes.toBytes(cfToDelete)); fileStatus=fs.listStatus(tableDir); for (i=0; i < fileStatus.length; i++) { if (fileStatus[i].isDirectory() == true) { FileStatus[] cf=fs.listStatus(fileStatus[i].getPath(),new PathFilter(){ @Override public boolean accept( Path p){ if (WALSplitter.isSequenceIdFile(p)) { return false; } return true; } } ); for (int j=0; j < cf.length; j++) { if (cf[j].isDirectory() == true) { assertFalse(cf[j].getPath().getName().equals(cfToDelete)); } } } } try { admin.deleteColumnFamily(TABLENAME,Bytes.toBytes(cfToDelete)); Assert.fail("Delete a non-exist column family should fail"); } catch ( InvalidFamilyOperationException e) { } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void deleteColumnFamilyWithMultipleRegions() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); HTableDescriptor beforehtd=admin.getTableDescriptor(TABLENAME); FileSystem fs=TEST_UTIL.getDFSCluster().getFileSystem(); assertTrue(admin.isTableAvailable(TABLENAME)); assertEquals(3,beforehtd.getColumnFamilies().length); HColumnDescriptor[] families=beforehtd.getColumnFamilies(); for (int i=0; i < families.length; i++) { assertTrue(families[i].getNameAsString().equals("cf" + (i + 1))); } Path tableDir=FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(),TABLENAME); assertTrue(fs.exists(tableDir)); FileStatus[] fileStatus=fs.listStatus(tableDir); for (int i=0; i < fileStatus.length; i++) { if (fileStatus[i].isDirectory() == true) { FileStatus[] cf=fs.listStatus(fileStatus[i].getPath(),new PathFilter(){ @Override public boolean accept( Path p){ if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { return false; } return true; } } ); int k=1; for (int j=0; j < cf.length; j++) { if (cf[j].isDirectory() == true && cf[j].getPath().getName().startsWith(".") == false) { assertEquals(cf[j].getPath().getName(),"cf" + k); k++; } } } } admin.disableTable(TABLENAME); admin.deleteColumnFamily(TABLENAME,Bytes.toBytes("cf2")); HTableDescriptor afterhtd=admin.getTableDescriptor(TABLENAME); assertEquals(2,afterhtd.getColumnFamilies().length); HColumnDescriptor[] newFamilies=afterhtd.getColumnFamilies(); assertTrue(newFamilies[0].getNameAsString().equals("cf1")); assertTrue(newFamilies[1].getNameAsString().equals("cf3")); fileStatus=fs.listStatus(tableDir); for (int i=0; i < fileStatus.length; i++) { if (fileStatus[i].isDirectory() == true) { FileStatus[] cf=fs.listStatus(fileStatus[i].getPath(),new PathFilter(){ @Override public boolean accept( Path p){ if (WALSplitter.isSequenceIdFile(p)) { return false; } return true; } } ); for (int j=0; j < cf.length; j++) { if (cf[j].isDirectory() == true) { assertFalse(cf[j].getPath().getName().equals("cf2")); } } } } }

Class: org.apache.hadoop.hbase.master.handler.TestTableDescriptorModification

InternalCallVerifier BooleanVerifier 
@Test public void testModifyColumnFamily() throws IOException { Admin admin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor cfDescriptor=new HColumnDescriptor(FAMILY_0); int blockSize=cfDescriptor.getBlocksize(); HTableDescriptor baseHtd=new HTableDescriptor(TABLE_NAME); baseHtd.addFamily(cfDescriptor); admin.createTable(baseHtd); admin.disableTable(TABLE_NAME); try { verifyTableDescriptor(TABLE_NAME,FAMILY_0); int newBlockSize=2 * blockSize; cfDescriptor.setBlocksize(newBlockSize); admin.modifyColumnFamily(TABLE_NAME,cfDescriptor); HTableDescriptor htd=admin.getTableDescriptor(TABLE_NAME); HColumnDescriptor hcfd=htd.getFamily(FAMILY_0); assertTrue(hcfd.getBlocksize() == newBlockSize); } finally { admin.deleteTable(TABLE_NAME); } }

Class: org.apache.hadoop.hbase.master.normalizer.TestSimpleRegionNormalizer

InternalCallVerifier BooleanVerifier 
@Test public void testNoNormalizationOnNormalizedCluster() throws HBaseIOException { TableName testTable=TableName.valueOf("testSplitOfSmallRegion"); List hris=new ArrayList<>(); Map regionSizes=new HashMap<>(); HRegionInfo hri1=new HRegionInfo(testTable,Bytes.toBytes("aaa"),Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(),10); HRegionInfo hri2=new HRegionInfo(testTable,Bytes.toBytes("bbb"),Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(),15); HRegionInfo hri3=new HRegionInfo(testTable,Bytes.toBytes("ccc"),Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(),8); HRegionInfo hri4=new HRegionInfo(testTable,Bytes.toBytes("ddd"),Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(),10); setupMocksForNormalizer(regionSizes,hris); List plans=normalizer.computePlanForTable(testTable); assertTrue(plans == null); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSplitOfLargeRegion() throws HBaseIOException { TableName testTable=TableName.valueOf("testSplitOfLargeRegion"); List hris=new ArrayList<>(); Map regionSizes=new HashMap<>(); HRegionInfo hri1=new HRegionInfo(testTable,Bytes.toBytes("aaa"),Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(),8); HRegionInfo hri2=new HRegionInfo(testTable,Bytes.toBytes("bbb"),Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(),6); HRegionInfo hri3=new HRegionInfo(testTable,Bytes.toBytes("ccc"),Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(),10); HRegionInfo hri4=new HRegionInfo(testTable,Bytes.toBytes("ddd"),Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(),30); setupMocksForNormalizer(regionSizes,hris); List plans=normalizer.computePlanForTable(testTable); NormalizationPlan plan=plans.get(0); assertTrue(plan instanceof SplitNormalizationPlan); assertEquals(hri4,((SplitNormalizationPlan)plan).getRegionInfo()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMergeOfSecondSmallestRegions() throws HBaseIOException { TableName testTable=TableName.valueOf("testMergeOfSmallRegions"); List hris=new ArrayList<>(); Map regionSizes=new HashMap<>(); HRegionInfo hri1=new HRegionInfo(testTable,Bytes.toBytes("aaa"),Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(),1); HRegionInfo hri2=new HRegionInfo(testTable,Bytes.toBytes("bbb"),Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(),10000); HRegionInfo hri3=new HRegionInfo(testTable,Bytes.toBytes("ccc"),Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(),10000); HRegionInfo hri4=new HRegionInfo(testTable,Bytes.toBytes("ddd"),Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(),10000); HRegionInfo hri5=new HRegionInfo(testTable,Bytes.toBytes("eee"),Bytes.toBytes("fff")); hris.add(hri5); regionSizes.put(hri5.getRegionName(),2700); HRegionInfo hri6=new HRegionInfo(testTable,Bytes.toBytes("fff"),Bytes.toBytes("ggg")); hris.add(hri6); regionSizes.put(hri6.getRegionName(),2700); setupMocksForNormalizer(regionSizes,hris); List plans=normalizer.computePlanForTable(testTable); NormalizationPlan plan=plans.get(0); assertTrue(plan instanceof MergeNormalizationPlan); assertEquals(hri5,((MergeNormalizationPlan)plan).getFirstRegion()); assertEquals(hri6,((MergeNormalizationPlan)plan).getSecondRegion()); }

InternalCallVerifier BooleanVerifier 
@Test public void testMergeOfSmallNonAdjacentRegions() throws HBaseIOException { TableName testTable=TableName.valueOf("testMergeOfSmallRegions"); List hris=new ArrayList<>(); Map regionSizes=new HashMap<>(); HRegionInfo hri1=new HRegionInfo(testTable,Bytes.toBytes("aaa"),Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(),15); HRegionInfo hri2=new HRegionInfo(testTable,Bytes.toBytes("bbb"),Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(),5); HRegionInfo hri3=new HRegionInfo(testTable,Bytes.toBytes("ccc"),Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(),16); HRegionInfo hri4=new HRegionInfo(testTable,Bytes.toBytes("ddd"),Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(),15); HRegionInfo hri5=new HRegionInfo(testTable,Bytes.toBytes("ddd"),Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri5.getRegionName(),5); setupMocksForNormalizer(regionSizes,hris); List plans=normalizer.computePlanForTable(testTable); assertTrue(plans == null); }

InternalCallVerifier BooleanVerifier 
@Test public void testNoNormalizationForMetaTable() throws HBaseIOException { TableName testTable=TableName.META_TABLE_NAME; List hris=new ArrayList<>(); Map regionSizes=new HashMap<>(); setupMocksForNormalizer(regionSizes,hris); List plans=normalizer.computePlanForTable(testTable); assertTrue(plans == null); }

InternalCallVerifier BooleanVerifier 
@Test public void testNoNormalizationIfTooFewRegions() throws HBaseIOException { TableName testTable=TableName.valueOf("testSplitOfSmallRegion"); List hris=new ArrayList<>(); Map regionSizes=new HashMap<>(); HRegionInfo hri1=new HRegionInfo(testTable,Bytes.toBytes("aaa"),Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(),10); HRegionInfo hri2=new HRegionInfo(testTable,Bytes.toBytes("bbb"),Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(),15); setupMocksForNormalizer(regionSizes,hris); List plans=normalizer.computePlanForTable(testTable); assertTrue(plans == null); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMergeOfSmallRegions() throws HBaseIOException { TableName testTable=TableName.valueOf("testMergeOfSmallRegions"); List hris=new ArrayList<>(); Map regionSizes=new HashMap<>(); HRegionInfo hri1=new HRegionInfo(testTable,Bytes.toBytes("aaa"),Bytes.toBytes("bbb")); hris.add(hri1); regionSizes.put(hri1.getRegionName(),15); HRegionInfo hri2=new HRegionInfo(testTable,Bytes.toBytes("bbb"),Bytes.toBytes("ccc")); hris.add(hri2); regionSizes.put(hri2.getRegionName(),5); HRegionInfo hri3=new HRegionInfo(testTable,Bytes.toBytes("ccc"),Bytes.toBytes("ddd")); hris.add(hri3); regionSizes.put(hri3.getRegionName(),5); HRegionInfo hri4=new HRegionInfo(testTable,Bytes.toBytes("ddd"),Bytes.toBytes("eee")); hris.add(hri4); regionSizes.put(hri4.getRegionName(),15); HRegionInfo hri5=new HRegionInfo(testTable,Bytes.toBytes("eee"),Bytes.toBytes("fff")); hris.add(hri5); regionSizes.put(hri5.getRegionName(),16); setupMocksForNormalizer(regionSizes,hris); List plans=normalizer.computePlanForTable(testTable); NormalizationPlan plan=plans.get(0); assertTrue(plan instanceof MergeNormalizationPlan); assertEquals(hri2,((MergeNormalizationPlan)plan).getFirstRegion()); assertEquals(hri3,((MergeNormalizationPlan)plan).getSecondRegion()); }

Class: org.apache.hadoop.hbase.master.normalizer.TestSimpleRegionNormalizerOnCluster

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) @SuppressWarnings("deprecation") public void testRegionNormalizationMergeOnCluster() throws Exception { final TableName TABLENAME=TableName.valueOf("testRegionNormalizationMergeOnCluster"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HMaster m=cluster.getMaster(); try (HTable ht=TEST_UTIL.createMultiRegionTable(TABLENAME,FAMILYNAME,5)){ List generatedRegions=TEST_UTIL.getHBaseCluster().getRegions(TABLENAME); Collections.sort(generatedRegions,new Comparator(){ @Override public int compare( HRegion o1, HRegion o2){ return o1.getRegionInfo().compareTo(o2.getRegionInfo()); } } ); HRegion region=generatedRegions.get(0); generateTestData(region,1); region.flush(true); region=generatedRegions.get(1); generateTestData(region,1); region.flush(true); region=generatedRegions.get(2); generateTestData(region,3); region.flush(true); region=generatedRegions.get(3); generateTestData(region,3); region.flush(true); region=generatedRegions.get(4); generateTestData(region,5); region.flush(true); } HTableDescriptor htd=admin.getTableDescriptor(TABLENAME); htd.setNormalizationEnabled(true); admin.modifyTable(TABLENAME,htd); admin.flush(TABLENAME); assertEquals(5,MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(),TABLENAME)); Thread.sleep(5000); m.normalizeRegions(); while (MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(),TABLENAME) > 4) { LOG.info("Waiting for normalization merge to complete"); Thread.sleep(100); } assertEquals(4,MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(),TABLENAME)); admin.disableTable(TABLENAME); admin.deleteTable(TABLENAME); }

Class: org.apache.hadoop.hbase.master.procedure.TestAddColumnFamilyProcedure

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAddSameColumnFamilyTwiceWithSameNonce() throws Exception { final TableName tableName=TableName.valueOf("testAddSameColumnFamilyTwiceWithSameNonce"); final String cf2="cf2"; final HColumnDescriptor columnDescriptor=new HColumnDescriptor(cf2); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1"); long procId1=procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(),tableName,columnDescriptor),nonceGroup,nonce); long procId2=procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(),tableName,columnDescriptor),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(),tableName,cf2); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); assertTrue(procId1 == procId2); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAddSameColumnFamilyTwice() throws Exception { final TableName tableName=TableName.valueOf("testAddColumnFamilyTwice"); final String cf2="cf2"; final HColumnDescriptor columnDescriptor=new HColumnDescriptor(cf2); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1"); long procId1=procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(),tableName,columnDescriptor),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(),tableName,cf2); long procId2=procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(),tableName,columnDescriptor),nonceGroup + 1,nonce + 1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureInfo result=procExec.getResult(procId2); assertTrue(result.isFailed()); LOG.debug("Add failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof InvalidFamilyOperationException); UTIL.getHBaseAdmin().disableTable(tableName); long procId3=procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(),tableName,columnDescriptor),nonceGroup + 2,nonce + 2); ProcedureTestingUtility.waitProcedure(procExec,procId3); result=procExec.getResult(procId3); assertTrue(result.isFailed()); LOG.debug("Add failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof InvalidFamilyOperationException); }

Class: org.apache.hadoop.hbase.master.procedure.TestCreateNamespaceProcedure

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testCreateSameNamespaceTwice() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testCreateSameNamespaceTwice").build(); final ProcedureExecutor procExec=getMasterProcedureExecutor(); long procId1=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); long procId2=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup + 1,nonce + 1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureInfo result=procExec.getResult(procId2); assertTrue(result.isFailed()); LOG.debug("Create namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof NamespaceExistException); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testCreateSameNamespaceTwiceWithSameNonce() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testCreateSameNamespaceTwiceWithSameNonce").build(); final ProcedureExecutor procExec=getMasterProcedureExecutor(); long procId1=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); long procId2=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); validateNamespaceCreated(nsd); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); assertTrue(procId1 == procId2); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testCreateNamespaceWithInvalidRegionCount() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testCreateNamespaceWithInvalidRegionCount").build(); final String nsKey="hbase.namespace.quota.maxregions"; final String nsValue="-1"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); nsd.setConfiguration(nsKey,nsValue); long procId=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Create namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof ConstraintException); }

InternalCallVerifier NullVerifier 
@Test(timeout=60000) public void testRollbackAndDoubleExecution() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testRollbackAndDoubleExecution").build(); final ProcedureExecutor procExec=getMasterProcedureExecutor(); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); long procId=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); int numberOfSteps=CreateNamespaceState.values().length - 2; MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec,procId,numberOfSteps,CreateNamespaceState.values()); try { NamespaceDescriptor nsDescriptor=UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName()); assertNull(nsDescriptor); } catch ( NamespaceNotFoundException nsnfe) { LOG.info("The namespace " + nsd.getName() + " is not created."); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testCreateSystemNamespace() throws Exception { final NamespaceDescriptor nsd=UTIL.getHBaseAdmin().getNamespaceDescriptor(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); final ProcedureExecutor procExec=getMasterProcedureExecutor(); long procId=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Create namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof NamespaceExistException); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testCreateNamespaceWithInvalidTableCount() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testCreateNamespaceWithInvalidTableCount").build(); final String nsKey="hbase.namespace.quota.maxtables"; final String nsValue="-1"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); nsd.setConfiguration(nsKey,nsValue); long procId=procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Create namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof ConstraintException); }

Class: org.apache.hadoop.hbase.master.procedure.TestCreateTableProcedure

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testCreateTwiceWithSameNonce() throws Exception { final TableName tableName=TableName.valueOf("testCreateTwiceWithSameNonce"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); final HTableDescriptor htd=MasterProcedureTestingUtility.createHTD(tableName,"f"); final HRegionInfo[] regions=ModifyRegionUtils.createHRegionInfos(htd,null); long procId1=procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(),htd,regions),nonceGroup,nonce); long procId2=procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(),htd,regions),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1)); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); assertTrue(procId1 == procId2); }

Class: org.apache.hadoop.hbase.master.procedure.TestDeleteColumnFamilyProcedure

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteColumnFamilyTwice() throws Exception { final TableName tableName=TableName.valueOf("testDeleteColumnFamilyTwice"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); final String cf2="cf2"; MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1",cf2); long procId1=procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),tableName,cf2.getBytes()),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),tableName,cf2); long procId2=procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),tableName,cf2.getBytes()),nonceGroup + 1,nonce + 1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureInfo result=procExec.getResult(procId2); assertTrue(result.isFailed()); LOG.debug("Delete online failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof InvalidFamilyOperationException); UTIL.getHBaseAdmin().disableTable(tableName); long procId3=procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),tableName,cf2.getBytes()),nonceGroup + 2,nonce + 2); ProcedureTestingUtility.waitProcedure(procExec,procId3); result=procExec.getResult(procId2); assertTrue(result.isFailed()); LOG.debug("Delete offline failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof InvalidFamilyOperationException); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteColumnFamilyTwiceWithSameNonce() throws Exception { final TableName tableName=TableName.valueOf("testDeleteColumnFamilyTwiceWithSameNonce"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); final String cf2="cf2"; MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1",cf2); long procId1=procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),tableName,cf2.getBytes()),nonceGroup,nonce); long procId2=procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),tableName,cf2.getBytes()),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),tableName,cf2); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); assertTrue(procId1 == procId2); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteNonExistingColumnFamily() throws Exception { final TableName tableName=TableName.valueOf("testDeleteNonExistingColumnFamily"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); final String cf3="cf3"; MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1","f2"); long procId1=procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),tableName,cf3.getBytes()),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureInfo result=procExec.getResult(procId1); assertTrue(result.isFailed()); LOG.debug("Delete failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof InvalidFamilyOperationException); }

Class: org.apache.hadoop.hbase.master.procedure.TestDeleteNamespaceProcedure

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteSameNamespaceTwiceWithSameNonce() throws Exception { final String namespaceName="testDeleteSameNamespaceTwiceWithSameNonce"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); createNamespaceForTesting(namespaceName); long procId1=procExec.submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(),namespaceName),nonceGroup,nonce); long procId2=procExec.submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(),namespaceName),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); validateNamespaceNotExist(namespaceName); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); assertTrue(procId1 == procId2); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test(timeout=60000) public void testRollbackAndDoubleExecution() throws Exception { final String namespaceName="testRollbackAndDoubleExecution"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); createNamespaceForTesting(namespaceName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); long procId=procExec.submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(),namespaceName),nonceGroup,nonce); int numberOfSteps=DeleteNamespaceState.values().length - 2; MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec,procId,numberOfSteps,DeleteNamespaceState.values()); NamespaceDescriptor createdNsDescriptor=UTIL.getHBaseAdmin().getNamespaceDescriptor(namespaceName); assertNotNull(createdNsDescriptor); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteNonEmptyNamespace() throws Exception { final String namespaceName="testDeleteNonExistNamespace"; final TableName tableName=TableName.valueOf("testDeleteNonExistNamespace:t1"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); createNamespaceForTesting(namespaceName); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1"); long procId=procExec.submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(),namespaceName),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Delete namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof ConstraintException); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteSystemNamespace() throws Exception { final String namespaceName=NamespaceDescriptor.SYSTEM_NAMESPACE.getName(); final ProcedureExecutor procExec=getMasterProcedureExecutor(); long procId=procExec.submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(),namespaceName),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Delete namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof ConstraintException); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteNonExistNamespace() throws Exception { final String namespaceName="testDeleteNonExistNamespace"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); validateNamespaceNotExist(namespaceName); long procId=procExec.submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(),namespaceName),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Delete namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof NamespaceNotFoundException); }

Class: org.apache.hadoop.hbase.master.procedure.TestDeleteTableProcedure

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDoubleDeletedTableWithSameNonce() throws Exception { final TableName tableName=TableName.valueOf("testDoubleDeletedTableWithSameNonce"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); HRegionInfo[] regions=MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f"); UTIL.getHBaseAdmin().disableTable(tableName); long procId1=procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(),tableName),nonceGroup,nonce); long procId2=procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(),tableName),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateTableDeletion(UTIL.getHBaseCluster().getMaster(),tableName,regions,"f"); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); assertTrue(procId1 == procId2); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteDeletedTable() throws Exception { final TableName tableName=TableName.valueOf("testDeleteDeletedTable"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); HRegionInfo[] regions=MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f"); UTIL.getHBaseAdmin().disableTable(tableName); long procId1=procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(),tableName),nonceGroup,nonce); long procId2=procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(),tableName),nonceGroup + 1,nonce + 1); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateTableDeletion(UTIL.getHBaseCluster().getMaster(),tableName,regions,"f"); ProcedureInfo result=procExec.getResult(procId2); assertTrue(result.isFailed()); LOG.debug("Delete failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotFoundException); }

Class: org.apache.hadoop.hbase.master.procedure.TestDisableTableProcedure

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test(timeout=60000) public void testDisableTableMultipleTimes() throws Exception { final TableName tableName=TableName.valueOf("testDisableTableMultipleTimes"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1","f2"); long procId1=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),tableName); long procId2=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup + 1,nonce + 1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureInfo result=procExec.getResult(procId2); assertTrue(result.isFailed()); LOG.debug("Disable failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotEnabledException); try { final ProcedurePrepareLatch prepareLatch=new ProcedurePrepareLatch.CompatibilityLatch(); long procId3=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,false,prepareLatch),nonceGroup + 2,nonce + 2); prepareLatch.await(); Assert.fail("Disable should throw exception through latch."); } catch ( TableNotEnabledException tnee) { LOG.debug("Disable failed with expected exception."); } long procId4=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,true)); ProcedureTestingUtility.waitProcedure(procExec,procId4); ProcedureTestingUtility.assertProcNotFailed(procExec,procId4); MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),tableName); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDisableTableTwiceWithSameNonce() throws Exception { final TableName tableName=TableName.valueOf("testDisableTableTwiceWithSameNonce"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1","f2"); long procId1=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); long procId2=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),tableName); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); assertTrue(procId1 == procId2); }

Class: org.apache.hadoop.hbase.master.procedure.TestEnableTableProcedure

UtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier 
@Test(timeout=60000,expected=TableNotDisabledException.class) public void testEnableNonDisabledTable() throws Exception { final TableName tableName=TableName.valueOf("testEnableNonExistingTable"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1","f2"); long procId1=procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureInfo result=procExec.getResult(procId1); assertTrue(result.isFailed()); LOG.debug("Enable failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotDisabledException); long procId2=procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(),tableName,true),nonceGroup + 1,nonce + 1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); final ProcedurePrepareLatch prepareLatch=new ProcedurePrepareLatch.CompatibilityLatch(); long procId3=procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(),tableName,false,prepareLatch),nonceGroup + 2,nonce + 2); prepareLatch.await(); Assert.fail("Enable should throw exception through latch."); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testEnableTableTwiceWithSameNonce() throws Exception { final TableName tableName=TableName.valueOf("testEnableTableTwiceWithSameNonce"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1","f2"); UTIL.getHBaseAdmin().disableTable(tableName); long procId1=procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); long procId2=procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); ProcedureTestingUtility.waitProcedure(procExec,procId2); ProcedureTestingUtility.assertProcNotFailed(procExec,procId2); assertTrue(procId1 == procId2); }

Class: org.apache.hadoop.hbase.master.procedure.TestMasterProcedureEvents

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMasterInitializedEvent() throws Exception { TableName tableName=TableName.valueOf("testMasterInitializedEvent"); HMaster master=UTIL.getMiniHBaseCluster().getMaster(); ProcedureExecutor procExec=master.getMasterProcedureExecutor(); MasterProcedureScheduler procSched=procExec.getEnvironment().getProcedureQueue(); HRegionInfo hri=new HRegionInfo(tableName); HTableDescriptor htd=new HTableDescriptor(tableName); HColumnDescriptor hcd=new HColumnDescriptor("f"); htd.addFamily(hcd); while (!master.isInitialized()) Thread.sleep(250); master.setInitialized(false); CreateTableProcedure proc=new CreateTableProcedure(procExec.getEnvironment(),htd,new HRegionInfo[]{hri}); long pollCalls=procSched.getPollCalls(); long nullPollCalls=procSched.getNullPollCalls(); long procId=procExec.submitProcedure(proc,HConstants.NO_NONCE,HConstants.NO_NONCE); for (int i=0; i < 10; ++i) { Thread.sleep(100); assertEquals(pollCalls + 1,procSched.getPollCalls()); assertEquals(nullPollCalls,procSched.getNullPollCalls()); } master.setInitialized(true); ProcedureTestingUtility.waitProcedure(procExec,procId); assertEquals(pollCalls + 2,procSched.getPollCalls()); assertEquals(nullPollCalls,procSched.getNullPollCalls()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testServerCrashProcedureEvent() throws Exception { TableName tableName=TableName.valueOf("testServerCrashProcedureEventTb"); HMaster master=UTIL.getMiniHBaseCluster().getMaster(); ProcedureExecutor procExec=master.getMasterProcedureExecutor(); MasterProcedureScheduler procSched=procExec.getEnvironment().getProcedureQueue(); while (!master.isServerCrashProcessingEnabled() || !master.isInitialized() || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { Thread.sleep(25); } UTIL.createTable(tableName,HBaseTestingUtility.COLUMNS[0]); try (Table t=UTIL.getConnection().getTable(tableName)){ UTIL.loadTable(t,HBaseTestingUtility.COLUMNS[0]); } master.setServerCrashProcessingEnabled(false); long pollCalls=procSched.getPollCalls(); long nullPollCalls=procSched.getNullPollCalls(); HRegionServer hrs=getServerWithRegions(); boolean carryingMeta=master.getAssignmentManager().isCarryingMeta(hrs.getServerName()); UTIL.getHBaseCluster().killRegionServer(hrs.getServerName()); hrs.join(); while (!master.getServerManager().isServerDead(hrs.getServerName())) Thread.sleep(10); master.getServerManager().moveFromOnelineToDeadServers(hrs.getServerName()); long procId=procExec.submitProcedure(new ServerCrashProcedure(hrs.getServerName(),true,carryingMeta)); for (int i=0; i < 10; ++i) { Thread.sleep(100); assertEquals(pollCalls + 1,procSched.getPollCalls()); assertEquals(nullPollCalls,procSched.getNullPollCalls()); } master.setServerCrashProcessingEnabled(true); ProcedureTestingUtility.waitProcedure(procExec,procId); LOG.debug("server crash processing poll calls: " + procSched.getPollCalls()); assertTrue(procSched.getPollCalls() >= (pollCalls + 2)); assertEquals(nullPollCalls,procSched.getNullPollCalls()); UTIL.deleteTable(tableName); }

Class: org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify the correct logic of RWLocks on the queue */ @Test public void testVerifyRwLocks() throws Exception { TableName tableName=TableName.valueOf("testtb"); queue.addBack(new TestTableProcedure(1,tableName,TableProcedureInterface.TableOperationType.EDIT)); queue.addBack(new TestTableProcedure(2,tableName,TableProcedureInterface.TableOperationType.READ)); queue.addBack(new TestTableProcedure(3,tableName,TableProcedureInterface.TableOperationType.EDIT)); queue.addBack(new TestTableProcedure(4,tableName,TableProcedureInterface.TableOperationType.READ)); queue.addBack(new TestTableProcedure(5,tableName,TableProcedureInterface.TableOperationType.READ)); Procedure proc=queue.poll(); assertEquals(1,proc.getProcId()); assertEquals(true,queue.tryAcquireTableExclusiveLock(proc,tableName)); assertEquals(null,queue.poll(0)); queue.releaseTableExclusiveLock(proc,tableName); Procedure rdProc=queue.poll(); assertEquals(2,rdProc.getProcId()); assertEquals(true,queue.tryAcquireTableSharedLock(rdProc,tableName)); Procedure wrProc=queue.poll(); assertEquals(3,wrProc.getProcId()); assertEquals(false,queue.tryAcquireTableExclusiveLock(wrProc,tableName)); queue.releaseTableSharedLock(rdProc,tableName); assertEquals(true,queue.tryAcquireTableExclusiveLock(wrProc,tableName)); assertEquals(null,queue.poll(0)); queue.releaseTableExclusiveLock(wrProc,tableName); rdProc=queue.poll(); assertEquals(4,rdProc.getProcId()); assertEquals(true,queue.tryAcquireTableSharedLock(rdProc,tableName)); Procedure rdProc2=queue.poll(); assertEquals(5,rdProc2.getProcId()); assertEquals(true,queue.tryAcquireTableSharedLock(rdProc2,tableName)); queue.releaseTableSharedLock(rdProc,tableName); queue.releaseTableSharedLock(rdProc2,tableName); assertEquals(0,queue.size()); assertTrue("queue should be deleted",queue.markTableAsDeleted(tableName)); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Check that the table queue is not deletable until every procedure * in-progress is completed (this is a special case for read-locks). */ @Test public void testCreateDeleteTableOperationsWithReadLock() throws Exception { final TableName tableName=TableName.valueOf("testtb"); final int nitems=2; for (int i=1; i <= nitems; ++i) { queue.addBack(new TestTableProcedure(i,tableName,TableProcedureInterface.TableOperationType.READ)); } assertFalse(queue.markTableAsDeleted(tableName)); Procedure[] procs=new Procedure[nitems]; for (int i=0; i < nitems; ++i) { Procedure proc=procs[i]=queue.poll(); assertEquals(i + 1,proc.getProcId()); assertTrue(queue.tryAcquireTableSharedLock(proc,tableName)); assertFalse(queue.markTableAsDeleted(tableName)); } for (int i=0; i < nitems; ++i) { assertFalse(queue.markTableAsDeleted(tableName)); queue.releaseTableSharedLock(procs[i],tableName); } assertEquals(0,queue.size()); assertTrue(queue.markTableAsDeleted(tableName)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Check that the table queue is not deletable until every procedure * in-progress is completed (this is a special case for write-locks). */ @Test public void testCreateDeleteTableOperationsWithWriteLock() throws Exception { TableName tableName=TableName.valueOf("testtb"); queue.addBack(new TestTableProcedure(1,tableName,TableProcedureInterface.TableOperationType.EDIT)); assertFalse(queue.markTableAsDeleted(tableName)); Procedure proc=queue.poll(); assertEquals(1,proc.getProcId()); assertTrue(queue.tryAcquireTableExclusiveLock(proc,tableName)); assertEquals(0,queue.size()); assertFalse(queue.markTableAsDeleted(tableName)); queue.releaseTableExclusiveLock(proc,tableName); assertTrue(queue.markTableAsDeleted(tableName)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify simple create/insert/fetch/delete of the table queue. */ @Test public void testSimpleTableOpsQueues() throws Exception { final int NUM_TABLES=10; final int NUM_ITEMS=10; int count=0; for (int i=1; i <= NUM_TABLES; ++i) { TableName tableName=TableName.valueOf(String.format("test-%04d",i)); for (int j=1; j <= NUM_ITEMS; ++j) { queue.addBack(new TestTableProcedure(i * 1000 + j,tableName,TableProcedureInterface.TableOperationType.EDIT)); assertEquals(++count,queue.size()); } } assertEquals(NUM_TABLES * NUM_ITEMS,queue.size()); for (int j=1; j <= NUM_ITEMS; ++j) { for (int i=1; i <= NUM_TABLES; ++i) { Procedure proc=queue.poll(); assertTrue(proc != null); TableName tableName=((TestTableProcedure)proc).getTableName(); queue.tryAcquireTableExclusiveLock(proc,tableName); queue.releaseTableExclusiveLock(proc,tableName); queue.completionCleanup(proc); assertEquals(--count,queue.size()); assertEquals(i * 1000 + j,proc.getProcId()); } } assertEquals(0,queue.size()); for (int i=1; i <= NUM_TABLES; ++i) { TableName tableName=TableName.valueOf(String.format("test-%04d",i)); assertTrue(queue.markTableAsDeleted(tableName)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testVerifyNamespaceRwLocks() throws Exception { String nsName1="ns1"; String nsName2="ns2"; TableName tableName1=TableName.valueOf(nsName1,"testtb"); TableName tableName2=TableName.valueOf(nsName2,"testtb"); queue.addBack(new TestNamespaceProcedure(1,nsName1,TableProcedureInterface.TableOperationType.EDIT)); queue.addBack(new TestTableProcedure(2,tableName1,TableProcedureInterface.TableOperationType.EDIT)); queue.addBack(new TestTableProcedure(3,tableName2,TableProcedureInterface.TableOperationType.EDIT)); queue.addBack(new TestNamespaceProcedure(4,nsName2,TableProcedureInterface.TableOperationType.EDIT)); Procedure procNs1=queue.poll(); assertEquals(1,procNs1.getProcId()); assertEquals(true,queue.tryAcquireNamespaceExclusiveLock(procNs1,nsName1)); Procedure procNs2=queue.poll(); assertEquals(4,procNs2.getProcId()); assertEquals(true,queue.tryAcquireNamespaceExclusiveLock(procNs2,nsName2)); queue.releaseNamespaceExclusiveLock(procNs2,nsName2); queue.yield(procNs2); procNs2=queue.poll(); assertEquals(3,procNs2.getProcId()); assertEquals(true,queue.tryAcquireTableExclusiveLock(procNs2,tableName2)); Procedure procNs2b=queue.poll(); assertEquals(4,procNs2b.getProcId()); assertEquals(false,queue.tryAcquireNamespaceExclusiveLock(procNs2b,nsName2)); queue.yield(procNs2b); queue.releaseNamespaceExclusiveLock(procNs1,nsName1); long procId=queue.poll().getProcId(); assertEquals(2,procId); queue.releaseTableExclusiveLock(procNs2,tableName2); procId=queue.poll().getProcId(); assertEquals(4,procId); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that "write" operations for a single table are serialized, * but different tables can be executed in parallel. */ @Test(timeout=90000) public void testConcurrentWriteOps() throws Exception { final TestTableProcSet procSet=new TestTableProcSet(queue); final int NUM_ITEMS=10; final int NUM_TABLES=4; final AtomicInteger opsCount=new AtomicInteger(0); for (int i=0; i < NUM_TABLES; ++i) { TableName tableName=TableName.valueOf(String.format("testtb-%04d",i)); for (int j=1; j < NUM_ITEMS; ++j) { procSet.addBack(new TestTableProcedure(i * 100 + j,tableName,TableProcedureInterface.TableOperationType.EDIT)); opsCount.incrementAndGet(); } } assertEquals(opsCount.get(),queue.size()); final Thread[] threads=new Thread[NUM_TABLES * 2]; final HashSet concurrentTables=new HashSet(); final ArrayList failures=new ArrayList(); final AtomicInteger concurrentCount=new AtomicInteger(0); for (int i=0; i < threads.length; ++i) { threads[i]=new Thread(){ @Override public void run(){ while (opsCount.get() > 0) { try { Procedure proc=procSet.acquire(); if (proc == null) { queue.signalAll(); if (opsCount.get() > 0) { continue; } break; } TableName tableId=procSet.getTableName(proc); synchronized (concurrentTables) { assertTrue("unexpected concurrency on " + tableId,concurrentTables.add(tableId)); } assertTrue(opsCount.decrementAndGet() >= 0); try { long procId=proc.getProcId(); int concurrent=concurrentCount.incrementAndGet(); assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= "+ NUM_TABLES,concurrent >= 1 && concurrent <= NUM_TABLES); LOG.debug("[S] tableId=" + tableId + " procId="+ procId+ " concurrent="+ concurrent); Thread.sleep(2000); concurrent=concurrentCount.decrementAndGet(); LOG.debug("[E] tableId=" + tableId + " procId="+ procId+ " concurrent="+ concurrent); assertTrue("dec-concurrent=" + concurrent,concurrent < NUM_TABLES); } finally { synchronized (concurrentTables) { assertTrue(concurrentTables.remove(tableId)); } procSet.release(proc); } } catch ( Throwable e) { LOG.error("Failed " + e.getMessage(),e); synchronized (failures) { failures.add(e.getMessage()); } } finally { queue.signalAll(); } } } } ; threads[i].start(); } for (int i=0; i < threads.length; ++i) { threads[i].join(); } assertTrue(failures.toString(),failures.isEmpty()); assertEquals(0,opsCount.get()); assertEquals(0,queue.size()); for (int i=1; i <= NUM_TABLES; ++i) { TableName table=TableName.valueOf(String.format("testtb-%04d",i)); assertTrue("queue should be deleted, table=" + table,queue.markTableAsDeleted(table)); } }

Class: org.apache.hadoop.hbase.master.procedure.TestModifyColumnFamilyProcedure

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testModifyNonExistingColumnFamily() throws Exception { final TableName tableName=TableName.valueOf("testModifyExistingColumnFamily"); final String cf2="cf2"; final HColumnDescriptor columnDescriptor=new HColumnDescriptor(cf2); int oldBlockSize=columnDescriptor.getBlocksize(); int newBlockSize=2 * oldBlockSize; final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f1"); columnDescriptor.setBlocksize(newBlockSize); long procId1=procExec.submitProcedure(new ModifyColumnFamilyProcedure(procExec.getEnvironment(),tableName,columnDescriptor),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureInfo result=procExec.getResult(procId1); assertTrue(result.isFailed()); LOG.debug("Modify failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof InvalidFamilyOperationException); }

Class: org.apache.hadoop.hbase.master.procedure.TestModifyNamespaceProcedure

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testModifyNamespace() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testModifyNamespace").build(); final String nsKey1="hbase.namespace.quota.maxregions"; final String nsValue1before="1111"; final String nsValue1after="9999"; final String nsKey2="hbase.namespace.quota.maxtables"; final String nsValue2="10"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); nsd.setConfiguration(nsKey1,nsValue1before); createNamespaceForTesting(nsd); NamespaceDescriptor currentNsDescriptor=UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName()); assertEquals(currentNsDescriptor.getConfigurationValue(nsKey1),nsValue1before); assertNull(currentNsDescriptor.getConfigurationValue(nsKey2)); nsd.setConfiguration(nsKey1,nsValue1after); nsd.setConfiguration(nsKey2,nsValue2); long procId1=procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId1); ProcedureTestingUtility.assertProcNotFailed(procExec,procId1); currentNsDescriptor=UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName()); assertEquals(nsd.getConfigurationValue(nsKey1),nsValue1after); assertEquals(currentNsDescriptor.getConfigurationValue(nsKey2),nsValue2); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testModifyNamespaceWithInvalidTableCount() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testModifyNamespaceWithInvalidTableCount").build(); final String nsKey="hbase.namespace.quota.maxtables"; final String nsValue="-1"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); createNamespaceForTesting(nsd); nsd.setConfiguration(nsKey,nsValue); long procId=procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Modify namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof ConstraintException); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=60000) public void testModifyNonExistNamespace() throws Exception { final String namespaceName="testModifyNonExistNamespace"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); try { NamespaceDescriptor nsDescriptor=UTIL.getHBaseAdmin().getNamespaceDescriptor(namespaceName); assertNull(nsDescriptor); } catch ( NamespaceNotFoundException nsnfe) { LOG.debug("The namespace " + namespaceName + " does not exist. This is expected."); } final NamespaceDescriptor nsd=NamespaceDescriptor.create(namespaceName).build(); long procId=procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("modify namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof NamespaceNotFoundException); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testRecoveryAndDoubleExecution() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testRecoveryAndDoubleExecution").build(); final String nsKey="foo"; final String nsValue="bar"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); createNamespaceForTesting(nsd); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); nsd.setConfiguration(nsKey,nsValue); long procId=procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); int numberOfSteps=ModifyNamespaceState.values().length; MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec,procId,numberOfSteps,ModifyNamespaceState.values()); ProcedureTestingUtility.assertProcNotFailed(procExec,procId); NamespaceDescriptor currentNsDescriptor=UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName()); assertEquals(currentNsDescriptor.getConfigurationValue(nsKey),nsValue); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testModifyNamespaceWithInvalidRegionCount() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testModifyNamespaceWithInvalidRegionCount").build(); final String nsKey="hbase.namespace.quota.maxregions"; final String nsValue="-1"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); createNamespaceForTesting(nsd); nsd.setConfiguration(nsKey,nsValue); long procId=procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Modify namespace failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof ConstraintException); }

InternalCallVerifier NullVerifier 
@Test(timeout=60000) public void testRollbackAndDoubleExecution() throws Exception { final NamespaceDescriptor nsd=NamespaceDescriptor.create("testRollbackAndDoubleExecution").build(); final String nsKey="foo"; final String nsValue="bar"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); createNamespaceForTesting(nsd); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); nsd.setConfiguration(nsKey,nsValue); long procId=procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(),nsd),nonceGroup,nonce); int numberOfSteps=ModifyNamespaceState.values().length - 2; MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec,procId,numberOfSteps,ModifyNamespaceState.values()); NamespaceDescriptor currentNsDescriptor=UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName()); assertNull(currentNsDescriptor.getConfigurationValue(nsKey)); }

Class: org.apache.hadoop.hbase.master.procedure.TestModifyTableProcedure

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testModifyTableAddCF() throws Exception { final TableName tableName=TableName.valueOf("testModifyTableAddCF"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"cf1"); HTableDescriptor currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(1,currentHtd.getFamiliesKeys().size()); String cf2="cf2"; HTableDescriptor htd=new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); htd.addFamily(new HColumnDescriptor(cf2)); long procId=ProcedureTestingUtility.submitAndWait(procExec,new ModifyTableProcedure(procExec.getEnvironment(),htd)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(2,currentHtd.getFamiliesKeys().size()); assertTrue(currentHtd.hasFamily(cf2.getBytes())); UTIL.getHBaseAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); String cf3="cf3"; HTableDescriptor htd2=new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); htd2.addFamily(new HColumnDescriptor(cf3)); long procId2=ProcedureTestingUtility.submitAndWait(procExec,new ModifyTableProcedure(procExec.getEnvironment(),htd2)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertTrue(currentHtd.hasFamily(cf3.getBytes())); assertEquals(3,currentHtd.getFamiliesKeys().size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testModifyTable() throws Exception { final TableName tableName=TableName.valueOf("testModifyTable"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"cf"); UTIL.getHBaseAdmin().disableTable(tableName); HTableDescriptor htd=new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); long newMaxFileSize=htd.getMaxFileSize() * 2; htd.setMaxFileSize(newMaxFileSize); htd.setRegionReplication(3); long procId1=ProcedureTestingUtility.submitAndWait(procExec,new ModifyTableProcedure(procExec.getEnvironment(),htd)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1)); HTableDescriptor currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(newMaxFileSize,currentHtd.getMaxFileSize()); boolean newReadOnlyOption=htd.isReadOnly() ? false : true; long newMemStoreFlushSize=htd.getMemStoreFlushSize() * 2; htd.setReadOnly(newReadOnlyOption); htd.setMemStoreFlushSize(newMemStoreFlushSize); long procId2=ProcedureTestingUtility.submitAndWait(procExec,new ModifyTableProcedure(procExec.getEnvironment(),htd)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(newReadOnlyOption,currentHtd.isReadOnly()); assertEquals(newMemStoreFlushSize,currentHtd.getMemStoreFlushSize()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRecoveryAndDoubleExecutionOnline() throws Exception { final TableName tableName=TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); final String cf2="cf2"; final String cf3="cf3"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); HRegionInfo[] regions=MasterProcedureTestingUtility.createTable(procExec,tableName,null,"cf1",cf3); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); HTableDescriptor htd=new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); boolean newCompactionEnableOption=htd.isCompactionEnabled() ? false : true; htd.setCompactionEnabled(newCompactionEnableOption); htd.addFamily(new HColumnDescriptor(cf2)); htd.removeFamily(cf3.getBytes()); long procId=procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(),htd),nonceGroup,nonce); int numberOfSteps=ModifyTableState.values().length; MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec,procId,numberOfSteps,ModifyTableState.values()); HTableDescriptor currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(newCompactionEnableOption,currentHtd.isCompactionEnabled()); assertEquals(2,currentHtd.getFamiliesKeys().size()); assertTrue(currentHtd.hasFamily(cf2.getBytes())); assertFalse(currentHtd.hasFamily(cf3.getBytes())); MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),tableName,regions,"cf1",cf2); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testModifyTableDeleteCF() throws Exception { final TableName tableName=TableName.valueOf("testModifyTableAddCF"); final String cf2="cf2"; final String cf3="cf3"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"cf1",cf2,cf3); HTableDescriptor currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(3,currentHtd.getFamiliesKeys().size()); HTableDescriptor htd=new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); htd.removeFamily(cf2.getBytes()); long procId=ProcedureTestingUtility.submitAndWait(procExec,new ModifyTableProcedure(procExec.getEnvironment(),htd)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(2,currentHtd.getFamiliesKeys().size()); assertFalse(currentHtd.hasFamily(cf2.getBytes())); UTIL.getHBaseAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); HTableDescriptor htd2=new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); htd2.removeFamily(cf3.getBytes()); long procId2=ProcedureTestingUtility.submitAndWait(procExec,new ModifyTableProcedure(procExec.getEnvironment(),htd2)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(1,currentHtd.getFamiliesKeys().size()); assertFalse(currentHtd.hasFamily(cf3.getBytes())); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testRecoveryAndDoubleExecutionOffline() throws Exception { final TableName tableName=TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); final String cf2="cf2"; final String cf3="cf3"; final ProcedureExecutor procExec=getMasterProcedureExecutor(); HRegionInfo[] regions=MasterProcedureTestingUtility.createTable(procExec,tableName,null,"cf1",cf3); UTIL.getHBaseAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); HTableDescriptor htd=new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); boolean newCompactionEnableOption=htd.isCompactionEnabled() ? false : true; htd.setCompactionEnabled(newCompactionEnableOption); htd.addFamily(new HColumnDescriptor(cf2)); htd.removeFamily(cf3.getBytes()); htd.setRegionReplication(3); long procId=procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(),htd),nonceGroup,nonce); int numberOfSteps=ModifyTableState.values().length; MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec,procId,numberOfSteps,ModifyTableState.values()); HTableDescriptor currentHtd=UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertEquals(newCompactionEnableOption,currentHtd.isCompactionEnabled()); assertEquals(2,currentHtd.getFamiliesKeys().size()); MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),tableName,regions,false,"cf1",cf2); }

Class: org.apache.hadoop.hbase.master.procedure.TestProcedureAdmin

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAbortProcedureInterruptedNotAllowed() throws Exception { final TableName tableName=TableName.valueOf("testAbortProcedureInterruptedNotAllowed"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); HRegionInfo[] regions=MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f"); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); long procId=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,true),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); boolean abortResult=procExec.abort(procId,false); assertFalse(abortResult); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,false); ProcedureTestingUtility.restart(procExec); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.assertProcNotFailed(procExec,procId); MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),tableName); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAbortProcedureFailure() throws Exception { final TableName tableName=TableName.valueOf("testAbortProcedureFailure"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); HRegionInfo[] regions=MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f"); UTIL.getHBaseAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); long procId=procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(),tableName),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); boolean abortResult=procExec.abort(procId,true); assertFalse(abortResult); MasterProcedureTestingUtility.testRestartWithAbort(procExec,procId); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.assertProcNotFailed(procExec,procId); MasterProcedureTestingUtility.validateTableDeletion(UTIL.getHBaseCluster().getMaster(),tableName,regions,"f"); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAbortProcedureSuccess() throws Exception { final TableName tableName=TableName.valueOf("testAbortProcedureSuccess"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f"); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); long procId=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); boolean abortResult=procExec.abort(procId,true); assertTrue(abortResult); MasterProcedureTestingUtility.testRestartWithAbort(procExec,procId); ProcedureTestingUtility.waitNoProcedureRunning(procExec); MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(),tableName); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAbortNonExistProcedure() throws Exception { final ProcedureExecutor procExec=getMasterProcedureExecutor(); Random randomGenerator=new Random(); long procId; do { procId=randomGenerator.nextLong(); } while (procExec.getResult(procId) != null); boolean abortResult=procExec.abort(procId,true); assertFalse(abortResult); }

BranchVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testListProcedure() throws Exception { final TableName tableName=TableName.valueOf("testListProcedure"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f"); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,true); long procId=procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(),tableName,false),nonceGroup,nonce); ProcedureTestingUtility.waitProcedure(procExec,procId); List listProcedures=procExec.listProcedures(); assertTrue(listProcedures.size() >= 1); boolean found=false; for ( ProcedureInfo procInfo : listProcedures) { if (procInfo.getProcId() == procId) { assertTrue(procInfo.getProcState() == ProcedureState.RUNNABLE); found=true; } else { assertTrue(procInfo.getProcState() == ProcedureState.FINISHED); } } assertTrue(found); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec,false); ProcedureTestingUtility.restart(procExec); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.assertProcNotFailed(procExec,procId); listProcedures=procExec.listProcedures(); for ( ProcedureInfo procInfo : listProcedures) { assertTrue(procInfo.getProcState() == ProcedureState.FINISHED); } }

Class: org.apache.hadoop.hbase.master.procedure.TestTruncateTableProcedure

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testTruncateNotDisabledTable() throws Exception { final TableName tableName=TableName.valueOf("testTruncateNotDisabledTable"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); MasterProcedureTestingUtility.createTable(procExec,tableName,null,"f"); long procId=ProcedureTestingUtility.submitAndWait(procExec,new TruncateTableProcedure(procExec.getEnvironment(),tableName,false)); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Truncate failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotDisabledException); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testTruncateNotExistentTable() throws Exception { final TableName tableName=TableName.valueOf("testTruncateNotExistentTable"); final ProcedureExecutor procExec=getMasterProcedureExecutor(); long procId=ProcedureTestingUtility.submitAndWait(procExec,new TruncateTableProcedure(procExec.getEnvironment(),tableName,true)); ProcedureInfo result=procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Truncate failed with exception: " + result.getExceptionFullMessage()); assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotFoundException); }

Class: org.apache.hadoop.hbase.master.procedure.TestWALProcedureStoreOnHDFS

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testWalAbortOnLowReplicationWithQueuedWriters() throws Exception { initConfig(UTIL.getConfiguration()); setup(); try { assertEquals(3,UTIL.getDFSCluster().getDataNodes().size()); store.registerListener(new ProcedureStore.ProcedureStoreListener(){ @Override public void postSync(){ Threads.sleepWithoutInterrupt(2000); } @Override public void abortProcess(){ } } ); final AtomicInteger reCount=new AtomicInteger(0); Thread[] thread=new Thread[store.getNumThreads() * 2 + 1]; for (int i=0; i < thread.length; ++i) { final long procId=i + 1; thread[i]=new Thread(){ public void run(){ try { LOG.debug("[S] INSERT " + procId); store.insert(new TestProcedure(procId,-1),null); LOG.debug("[E] INSERT " + procId); } catch ( RuntimeException e) { reCount.incrementAndGet(); LOG.debug("[F] INSERT " + procId + ": "+ e.getMessage()); } } } ; thread[i].start(); } Thread.sleep(1000); LOG.info("Stop DataNode"); UTIL.getDFSCluster().stopDataNode(0); assertEquals(2,UTIL.getDFSCluster().getDataNodes().size()); for (int i=0; i < thread.length; ++i) { thread[i].join(); } assertFalse(store.isRunning()); assertTrue(reCount.toString(),reCount.get() >= store.getNumThreads() && reCount.get() < thread.length); } finally { tearDown(); } }

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
@Test(timeout=60000,expected=RuntimeException.class) public void testWalAbortOnLowReplication() throws Exception { initConfig(UTIL.getConfiguration()); setup(); try { assertEquals(3,UTIL.getDFSCluster().getDataNodes().size()); LOG.info("Stop DataNode"); UTIL.getDFSCluster().stopDataNode(0); assertEquals(2,UTIL.getDFSCluster().getDataNodes().size()); store.insert(new TestProcedure(1,-1),null); for (long i=2; store.isRunning(); ++i) { assertEquals(2,UTIL.getDFSCluster().getDataNodes().size()); store.insert(new TestProcedure(i,-1),null); Thread.sleep(100); } assertFalse(store.isRunning()); fail("The store.insert() should throw an exeption"); } finally { tearDown(); } }

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testWalRollOnLowReplication() throws Exception { initConfig(UTIL.getConfiguration()); UTIL.getConfiguration().setInt("dfs.namenode.replication.min",1); setup(); try { int dnCount=0; store.insert(new TestProcedure(1,-1),null); UTIL.getDFSCluster().restartDataNode(dnCount); for (long i=2; i < 100; ++i) { store.insert(new TestProcedure(i,-1),null); waitForNumReplicas(3); Thread.sleep(100); if ((i % 30) == 0) { LOG.info("Restart Data Node"); UTIL.getDFSCluster().restartDataNode(++dnCount % 3); } } assertTrue(store.isRunning()); } finally { tearDown(); } }

Class: org.apache.hadoop.hbase.master.snapshot.TestSnapshotFileCache

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWeNeverCacheTmpDirAndLoadIt() throws Exception { final AtomicInteger count=new AtomicInteger(0); long period=Long.MAX_VALUE; SnapshotFileCache cache=new SnapshotFileCache(fs,rootDir,period,10000000,"test-snapshot-file-cache-refresh",new SnapshotFiles()){ @Override List getSnapshotsInProgress() throws IOException { List result=super.getSnapshotsInProgress(); count.incrementAndGet(); return result; } @Override public void triggerCacheRefreshForTesting(){ super.triggerCacheRefreshForTesting(); } } ; SnapshotMock.SnapshotBuilder complete=createAndTestSnapshotV1(cache,"snapshot",false,false); SnapshotMock.SnapshotBuilder inProgress=createAndTestSnapshotV1(cache,"snapshotInProgress",true,false); int countBeforeCheck=count.get(); FSUtils.logFileSystemState(fs,rootDir,LOG); List allStoreFiles=getStoreFilesForSnapshot(complete); Iterable deletableFiles=cache.getUnreferencedFiles(allStoreFiles); assertTrue(Iterables.isEmpty(deletableFiles)); assertEquals(0,count.get() - countBeforeCheck); FileStatus randomFile=mockStoreFile(UUID.randomUUID().toString()); allStoreFiles.add(randomFile); deletableFiles=cache.getUnreferencedFiles(allStoreFiles); assertEquals(randomFile,Iterables.getOnlyElement(deletableFiles)); assertEquals(1,count.get() - countBeforeCheck); }

Class: org.apache.hadoop.hbase.master.snapshot.TestSnapshotHFileCleaner

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testFindsSnapshotFilesWhenCleaning() throws IOException { Configuration conf=TEST_UTIL.getConfiguration(); FSUtils.setRootDir(conf,TEST_UTIL.getDataTestDir()); Path rootDir=FSUtils.getRootDir(conf); Path archivedHfileDir=new Path(TEST_UTIL.getDataTestDir(),HConstants.HFILE_ARCHIVE_DIRECTORY); FileSystem fs=FileSystem.get(conf); SnapshotHFileCleaner cleaner=new SnapshotHFileCleaner(); cleaner.setConf(conf); String snapshotName="snapshot"; byte[] snapshot=Bytes.toBytes(snapshotName); TableName tableName=TableName.valueOf("table"); Path snapshotDir=SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,rootDir); HRegionInfo mockRegion=new HRegionInfo(tableName); Path regionSnapshotDir=new Path(snapshotDir,mockRegion.getEncodedName()); Path familyDir=new Path(regionSnapshotDir,"family"); String hfile="fd1e73e8a96c486090c5cec07b4894c4"; Path refFile=new Path(familyDir,hfile); fs.create(refFile); fs.mkdirs(archivedHfileDir); fs.createNewFile(new Path(archivedHfileDir,hfile)); assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile))); }

Class: org.apache.hadoop.hbase.master.snapshot.TestSnapshotManager

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Verify the snapshot support based on the configuration. */ @Test public void testSnapshotSupportConfiguration() throws Exception { Configuration conf=new Configuration(); SnapshotManager manager=getNewManager(conf); assertFalse("Snapshot should be disabled with no configuration",isSnapshotSupported(manager)); conf=new Configuration(); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED,true); manager=getNewManager(conf); assertTrue("Snapshot should be enabled",isSnapshotSupported(manager)); conf=new Configuration(); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED,false); manager=getNewManager(conf); assertFalse("Snapshot should be disabled",isSnapshotSupported(manager)); conf=new Configuration(); conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,SnapshotHFileCleaner.class.getName(),HFileLinkCleaner.class.getName()); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED,false); manager=getNewManager(conf); assertFalse("Snapshot should be disabled",isSnapshotSupported(manager)); conf=new Configuration(); conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,SnapshotHFileCleaner.class.getName(),HFileLinkCleaner.class.getName()); manager=getNewManager(conf); assertTrue("Snapshot should be enabled, because cleaners are present",isSnapshotSupported(manager)); Path rootDir=UTIL.getDataTestDir(); Path testSnapshotDir=SnapshotDescriptionUtils.getCompletedSnapshotDir("testSnapshotSupportConfiguration",rootDir); fs.mkdirs(testSnapshotDir); try { conf=new Configuration(); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED,false); manager=getNewManager(conf); fail("Master should not start when snapshot is disabled, but snapshots are present"); } catch ( UnsupportedOperationException e) { } finally { fs.delete(testSnapshotDir,true); } }

InternalCallVerifier BooleanVerifier 
@Test public void testInProcess() throws KeeperException, IOException { TableName tableName=TableName.valueOf("testTable"); SnapshotManager manager=getNewManager(); TakeSnapshotHandler handler=Mockito.mock(TakeSnapshotHandler.class); assertFalse("Manager is in process when there is no current handler",manager.isTakingSnapshot(tableName)); manager.setSnapshotHandlerForTesting(tableName,handler); Mockito.when(handler.isFinished()).thenReturn(false); assertTrue("Manager isn't in process when handler is running",manager.isTakingSnapshot(tableName)); Mockito.when(handler.isFinished()).thenReturn(true); assertFalse("Manager is process when handler isn't running",manager.isTakingSnapshot(tableName)); }

Class: org.apache.hadoop.hbase.metrics.TestBaseSourceImpl

InternalCallVerifier EqualityVerifier 
@Test public void testIncCounters() throws Exception { bmsi.incCounters("testinccounter",100); assertEquals(100,((MutableCounterLong)bmsi.metricsRegistry.get("testinccounter")).value()); bmsi.incCounters("testinccounter",100); assertEquals(200,((MutableCounterLong)bmsi.metricsRegistry.get("testinccounter")).value()); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetGauge() throws Exception { bmsi.setGauge("testset",100); assertEquals(100,((MutableGaugeLong)bmsi.metricsRegistry.get("testset")).value()); bmsi.setGauge("testset",300); assertEquals(300,((MutableGaugeLong)bmsi.metricsRegistry.get("testset")).value()); }

InternalCallVerifier NullVerifier 
@Test public void testRemoveMetric() throws Exception { bmsi.setGauge("testrmgauge",100); bmsi.removeMetric("testrmgauge"); assertNull(bmsi.metricsRegistry.get("testrmgauge")); }

InternalCallVerifier EqualityVerifier 
@Test public void testIncGauge() throws Exception { bmsi.incGauge("testincgauge",100); assertEquals(100,((MutableGaugeLong)bmsi.metricsRegistry.get("testincgauge")).value()); bmsi.incGauge("testincgauge",100); assertEquals(200,((MutableGaugeLong)bmsi.metricsRegistry.get("testincgauge")).value()); }

InternalCallVerifier EqualityVerifier 
@Test public void testDecGauge() throws Exception { bmsi.decGauge("testdec",100); assertEquals(-100,((MutableGaugeLong)bmsi.metricsRegistry.get("testdec")).value()); bmsi.decGauge("testdec",100); assertEquals(-200,((MutableGaugeLong)bmsi.metricsRegistry.get("testdec")).value()); }

Class: org.apache.hadoop.hbase.mob.TestCachedMobFile

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testOpenClose() throws Exception { String caseName=getName(); Path testDir=TEST_UTIL.getDataTestDir(); FileSystem fs=testDir.getFileSystem(conf); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,fs).withOutputDir(testDir).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer,caseName); CachedMobFile cachedMobFile=CachedMobFile.create(fs,writer.getPath(),conf,cacheConf); Assert.assertEquals(EXPECTED_REFERENCE_ZERO,cachedMobFile.getReferenceCount()); cachedMobFile.open(); Assert.assertEquals(EXPECTED_REFERENCE_ONE,cachedMobFile.getReferenceCount()); cachedMobFile.open(); Assert.assertEquals(EXPECTED_REFERENCE_TWO,cachedMobFile.getReferenceCount()); cachedMobFile.close(); Assert.assertEquals(EXPECTED_REFERENCE_ONE,cachedMobFile.getReferenceCount()); cachedMobFile.close(); Assert.assertEquals(EXPECTED_REFERENCE_ZERO,cachedMobFile.getReferenceCount()); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testReadKeyValue() throws Exception { Path testDir=TEST_UTIL.getDataTestDir(); FileSystem fs=testDir.getFileSystem(conf); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,fs).withOutputDir(testDir).withFileContext(meta).build(); String caseName=getName(); MobTestUtil.writeStoreFile(writer,caseName); CachedMobFile cachedMobFile=CachedMobFile.create(fs,writer.getPath(),conf,cacheConf); byte[] family=Bytes.toBytes(caseName); byte[] qualify=Bytes.toBytes(caseName); byte[] startKey=Bytes.toBytes("aa"); KeyValue expectedKey=new KeyValue(startKey,family,qualify,Long.MAX_VALUE,Type.Put,startKey); KeyValue seekKey=expectedKey.createKeyOnly(false); Cell cell=cachedMobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] endKey=Bytes.toBytes("zz"); expectedKey=new KeyValue(endKey,family,qualify,Long.MAX_VALUE,Type.Put,endKey); seekKey=expectedKey.createKeyOnly(false); cell=cachedMobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] randomKey=Bytes.toBytes(MobTestUtil.generateRandomString(2)); expectedKey=new KeyValue(randomKey,family,qualify,Long.MAX_VALUE,Type.Put,randomKey); seekKey=expectedKey.createKeyOnly(false); cell=cachedMobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] lowerKey=Bytes.toBytes("a1"); expectedKey=new KeyValue(startKey,family,qualify,Long.MAX_VALUE,Type.Put,startKey); seekKey=new KeyValue(lowerKey,family,qualify,Long.MAX_VALUE,Type.Put,lowerKey); cell=cachedMobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] upperKey=Bytes.toBytes("z{"); seekKey=new KeyValue(upperKey,family,qualify,Long.MAX_VALUE,Type.Put,upperKey); cell=cachedMobFile.readCell(seekKey,false); Assert.assertNull(cell); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCompare() throws Exception { String caseName=getName(); Path testDir=TEST_UTIL.getDataTestDir(); FileSystem fs=testDir.getFileSystem(conf); Path outputDir1=new Path(testDir,FAMILY1); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer1=new StoreFile.WriterBuilder(conf,cacheConf,fs).withOutputDir(outputDir1).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer1,caseName); CachedMobFile cachedMobFile1=CachedMobFile.create(fs,writer1.getPath(),conf,cacheConf); Path outputDir2=new Path(testDir,FAMILY2); StoreFile.Writer writer2=new StoreFile.WriterBuilder(conf,cacheConf,fs).withOutputDir(outputDir2).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer2,caseName); CachedMobFile cachedMobFile2=CachedMobFile.create(fs,writer2.getPath(),conf,cacheConf); cachedMobFile1.access(1); cachedMobFile2.access(2); Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile2),1); Assert.assertEquals(cachedMobFile2.compareTo(cachedMobFile1),-1); Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile1),0); }

Class: org.apache.hadoop.hbase.mob.TestExpiredMobFileCleaner

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Creates a 3 day old hfile and an 1 day old hfile then sets expiry to 2 days. * Verifies that the 3 day old hfile is removed but the 1 day one is still present * after the expiry based cleaner is run. */ @Test public void testCleaner() throws Exception { init(); Path mobDirPath=MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(),tableName,family); byte[] dummyData=makeDummyData(600); long ts=System.currentTimeMillis() - 3 * secondsOfDay() * 1000; putKVAndFlush(table,row1,dummyData,ts); FileStatus[] firstFiles=TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); assertEquals("Before cleanup without delay 1",1,firstFiles.length); String firstFile=firstFiles[0].getPath().getName(); ts=System.currentTimeMillis() - 1 * secondsOfDay() * 1000; putKVAndFlush(table,row2,dummyData,ts); FileStatus[] secondFiles=TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); assertEquals("Before cleanup without delay 2",2,secondFiles.length); String f1=secondFiles[0].getPath().getName(); String f2=secondFiles[1].getPath().getName(); String secondFile=f1.equals(firstFile) ? f2 : f1; modifyColumnExpiryDays(2); String[] args=new String[2]; args[0]=tableName.getNameAsString(); args[1]=family; ToolRunner.run(TEST_UTIL.getConfiguration(),new ExpiredMobFileCleaner(),args); FileStatus[] filesAfterClean=TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); String lastFile=filesAfterClean[0].getPath().getName(); assertEquals("After cleanup without delay 1",1,filesAfterClean.length); assertEquals("After cleanup without delay 2",secondFile,lastFile); }

Class: org.apache.hadoop.hbase.mob.TestMobFile

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testGetScanner() throws Exception { Path testDir=TEST_UTIL.getDataTestDir(); FileSystem fs=testDir.getFileSystem(conf); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,fs).withOutputDir(testDir).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer,getName()); MobFile mobFile=new MobFile(new StoreFile(fs,writer.getPath(),conf,cacheConf,BloomType.NONE)); assertNotNull(mobFile.getScanner()); assertTrue(mobFile.getScanner() instanceof StoreFileScanner); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testReadKeyValue() throws Exception { Path testDir=TEST_UTIL.getDataTestDir(); FileSystem fs=testDir.getFileSystem(conf); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,fs).withOutputDir(testDir).withFileContext(meta).build(); String caseName=getName(); MobTestUtil.writeStoreFile(writer,caseName); MobFile mobFile=new MobFile(new StoreFile(fs,writer.getPath(),conf,cacheConf,BloomType.NONE)); byte[] family=Bytes.toBytes(caseName); byte[] qualify=Bytes.toBytes(caseName); byte[] startKey=Bytes.toBytes("aa"); KeyValue expectedKey=new KeyValue(startKey,family,qualify,Long.MAX_VALUE,Type.Put,startKey); KeyValue seekKey=expectedKey.createKeyOnly(false); Cell cell=mobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] endKey=Bytes.toBytes("zz"); expectedKey=new KeyValue(endKey,family,qualify,Long.MAX_VALUE,Type.Put,endKey); seekKey=expectedKey.createKeyOnly(false); cell=mobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] randomKey=Bytes.toBytes(MobTestUtil.generateRandomString(2)); expectedKey=new KeyValue(randomKey,family,qualify,Long.MAX_VALUE,Type.Put,randomKey); seekKey=expectedKey.createKeyOnly(false); cell=mobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] lowerKey=Bytes.toBytes("a1"); expectedKey=new KeyValue(startKey,family,qualify,Long.MAX_VALUE,Type.Put,startKey); seekKey=new KeyValue(lowerKey,family,qualify,Long.MAX_VALUE,Type.Put,lowerKey); cell=mobFile.readCell(seekKey,false); MobTestUtil.assertCellEquals(expectedKey,cell); byte[] upperKey=Bytes.toBytes("z{"); seekKey=new KeyValue(upperKey,family,qualify,Long.MAX_VALUE,Type.Put,upperKey); cell=mobFile.readCell(seekKey,false); assertNull(cell); }

Class: org.apache.hadoop.hbase.mob.TestMobFileCache

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMobFileCache() throws Exception { FileSystem fs=FileSystem.get(conf); conf.set(MobConstants.MOB_FILE_CACHE_SIZE_KEY,TEST_CACHE_SIZE); mobFileCache=new MobFileCache(conf); Path file1Path=createMobStoreFile(FAMILY1); Path file2Path=createMobStoreFile(FAMILY2); Path file3Path=createMobStoreFile(FAMILY3); assertEquals(EXPECTED_CACHE_SIZE_ZERO,mobFileCache.getCacheSize()); CachedMobFile cachedMobFile1=(CachedMobFile)mobFileCache.openFile(fs,file1Path,mobCacheConf); assertEquals(EXPECTED_CACHE_SIZE_ONE,mobFileCache.getCacheSize()); assertNotNull(cachedMobFile1); assertEquals(EXPECTED_REFERENCE_TWO,cachedMobFile1.getReferenceCount()); mobFileCache.evict(); assertEquals(EXPECTED_CACHE_SIZE_ONE,mobFileCache.getCacheSize()); assertEquals(EXPECTED_REFERENCE_TWO,cachedMobFile1.getReferenceCount()); mobFileCache.evictFile(file1Path.getName()); assertEquals(EXPECTED_CACHE_SIZE_ZERO,mobFileCache.getCacheSize()); assertEquals(EXPECTED_REFERENCE_ONE,cachedMobFile1.getReferenceCount()); cachedMobFile1.close(); cachedMobFile1=(CachedMobFile)mobFileCache.openFile(fs,file1Path,mobCacheConf); assertEquals(EXPECTED_CACHE_SIZE_ONE,mobFileCache.getCacheSize()); CachedMobFile cachedMobFile2=(CachedMobFile)mobFileCache.openFile(fs,file2Path,mobCacheConf); assertEquals(EXPECTED_CACHE_SIZE_TWO,mobFileCache.getCacheSize()); CachedMobFile cachedMobFile3=(CachedMobFile)mobFileCache.openFile(fs,file3Path,mobCacheConf); assertEquals(EXPECTED_CACHE_SIZE_THREE,mobFileCache.getCacheSize()); assertEquals(EXPECTED_REFERENCE_TWO,cachedMobFile1.getReferenceCount()); assertEquals(EXPECTED_REFERENCE_TWO,cachedMobFile2.getReferenceCount()); assertEquals(EXPECTED_REFERENCE_TWO,cachedMobFile3.getReferenceCount()); mobFileCache.evict(); assertEquals(EXPECTED_CACHE_SIZE_ONE,mobFileCache.getCacheSize()); assertEquals(EXPECTED_REFERENCE_ONE,cachedMobFile1.getReferenceCount()); assertEquals(EXPECTED_REFERENCE_ONE,cachedMobFile2.getReferenceCount()); assertEquals(EXPECTED_REFERENCE_TWO,cachedMobFile3.getReferenceCount()); }

Class: org.apache.hadoop.hbase.mob.TestMobFileLink

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMobFilePath() throws IOException { TableName tableName=TableName.valueOf("testMobFilePath"); Configuration conf=HBaseConfiguration.create(); FileSystem fs=FileSystem.get(conf); Path rootDir=FSUtils.getRootDir(conf); Path tableDir=FSUtils.getTableDir(rootDir,tableName); Path archiveDir=FSUtils.getTableDir(HFileArchiveUtil.getArchivePath(conf),tableName); String fileName="mobFile"; String encodedRegionName=MobUtils.getMobRegionInfo(tableName).getEncodedName(); String columnFamily="columnFamily"; Path regionDir=new Path(tableDir,encodedRegionName); Path archivedRegionDir=new Path(archiveDir,encodedRegionName); Path expectedMobFilePath=new Path(MobUtils.getMobFamilyPath(conf,tableName,columnFamily),fileName).makeQualified(fs.getUri(),fs.getWorkingDirectory()); Path expectedOriginPath=new Path(new Path(regionDir,columnFamily),fileName).makeQualified(fs.getUri(),fs.getWorkingDirectory()); Path expectedArchivePath=new Path(new Path(archivedRegionDir,columnFamily),fileName).makeQualified(fs.getUri(),fs.getWorkingDirectory()); String hfileLinkName=tableName.getNameAsString() + "=" + encodedRegionName+ "-"+ fileName; Path hfileLinkPath=new Path(columnFamily,hfileLinkName); HFileLink hfileLink=HFileLink.buildFromHFileLinkPattern(conf,hfileLinkPath); Assert.assertEquals(expectedMobFilePath,hfileLink.getMobPath()); Assert.assertEquals(expectedOriginPath,hfileLink.getOriginPath()); Assert.assertEquals(expectedArchivePath,hfileLink.getArchivePath()); }

Class: org.apache.hadoop.hbase.mob.TestMobFileName

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testEquals(){ MobFileName mobFileName=MobFileName.create(startKey,dateStr,uuid); assertTrue(mobFileName.equals(mobFileName)); assertFalse(mobFileName.equals(this)); assertTrue(mobFileName.equals(MobFileName.create(startKey,dateStr,uuid))); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGet(){ MobFileName mobFileName=MobFileName.create(startKey,dateStr,uuid); assertEquals(MD5Hash.getMD5AsHex(startKey,0,startKey.length),mobFileName.getStartKey()); assertEquals(dateStr,mobFileName.getDate()); assertEquals(mobFileName.getFileName(),MD5Hash.getMD5AsHex(startKey,0,startKey.length) + dateStr + uuid); }

Class: org.apache.hadoop.hbase.mob.compactions.TestMobCompactor

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testMajorCompactionFromAdmin() throws Exception { resetConf(); int mergeSize=5000; conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,mergeSize); String tableNameAsString="testMajorCompactionFromAdmin"; SecureRandom rng=new SecureRandom(); byte[] keyBytes=new byte[AES.KEY_LENGTH]; rng.nextBytes(keyBytes); String algorithm=conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); Key cfKey=new SecretKeySpec(keyBytes,algorithm); byte[] encryptionKey=EncryptionUtil.wrapKey(conf,conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,User.getCurrent().getShortName()),cfKey); TableName tableName=TableName.valueOf(tableNameAsString); HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor hcd1=new HColumnDescriptor(family1); hcd1.setMobEnabled(true); hcd1.setMobThreshold(0); hcd1.setEncryptionType(algorithm); hcd1.setEncryptionKey(encryptionKey); HColumnDescriptor hcd2=new HColumnDescriptor(family2); hcd2.setMobEnabled(true); hcd2.setMobThreshold(0); desc.addFamily(hcd1); desc.addFamily(hcd2); admin.createTable(desc,getSplitKeys()); Table table=conn.getTable(tableName); BufferedMutator bufMut=conn.getBufferedMutator(tableName); int count=4; loadData(admin,bufMut,tableName,count,rowNumPerFile); int rowNumPerRegion=count * rowNumPerFile; assertEquals("Before deleting: mob rows count",regionNum * rowNumPerRegion,countMobRows(table)); assertEquals("Before deleting: mob cells count",regionNum * cellNumPerRow * rowNumPerRegion,countMobCells(table)); assertEquals("Before deleting: mob file count",regionNum * count,countFiles(tableName,true,family1)); createDelFile(table,tableName,Bytes.toBytes(family1),Bytes.toBytes(qf1)); assertEquals("Before compaction: mob rows count",regionNum * (rowNumPerRegion - delRowNum),countMobRows(table)); assertEquals("Before compaction: mob cells count",regionNum * (cellNumPerRow * rowNumPerRegion - delCellNum),countMobCells(table)); assertEquals("Before compaction: family1 mob file count",regionNum * count,countFiles(tableName,true,family1)); assertEquals("Before compaction: family2 mob file count",regionNum * count,countFiles(tableName,true,family2)); assertEquals("Before compaction: family1 del file count",regionNum,countFiles(tableName,false,family1)); assertEquals("Before compaction: family2 del file count",regionNum,countFiles(tableName,false,family2)); admin.majorCompact(tableName,hcd1.getName(),Admin.CompactType.MOB); waitUntilMobCompactionFinished(tableName); assertEquals("After compaction: mob rows count",regionNum * (rowNumPerRegion - delRowNum),countMobRows(table)); assertEquals("After compaction: mob cells count",regionNum * (cellNumPerRow * rowNumPerRegion - delCellNum),countMobCells(table)); assertEquals("After compaction: family1 mob file count",regionNum,countFiles(tableName,true,family1)); assertEquals("After compaction: family2 mob file count",regionNum * count,countFiles(tableName,true,family2)); assertEquals("After compaction: family1 del file count",0,countFiles(tableName,false,family1)); assertEquals("After compaction: family2 del file count",regionNum,countFiles(tableName,false,family2)); Assert.assertTrue(verifyEncryption(tableName,family1)); table.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testScannerAfterCompactions() throws Exception { resetConf(); setUp("testScannerAfterCompactions"); long ts=EnvironmentEdgeManager.currentTime(); byte[] key0=Bytes.toBytes("k0"); byte[] key1=Bytes.toBytes("k1"); String value="mobValue"; String newValue="new"; Put put0=new Put(key0); put0.addColumn(Bytes.toBytes(family1),Bytes.toBytes(qf1),ts,Bytes.toBytes(value)); loadData(admin,bufMut,tableName,new Put[]{put0}); Put put1=new Put(key1); put1.addColumn(Bytes.toBytes(family1),Bytes.toBytes(qf1),ts,Bytes.toBytes(value)); loadData(admin,bufMut,tableName,new Put[]{put1}); put1=new Put(key1); put1.addColumn(Bytes.toBytes(family1),Bytes.toBytes(qf1),ts,Bytes.toBytes(newValue)); loadData(admin,bufMut,tableName,new Put[]{put1}); admin.majorCompact(tableName); waitUntilCompactionFinished(tableName); admin.majorCompact(tableName,hcd1.getName(),Admin.CompactType.MOB); waitUntilMobCompactionFinished(tableName); Get get=new Get(key1); Result result=table.get(get); Cell cell=result.getColumnLatestCell(hcd1.getName(),Bytes.toBytes(qf1)); assertEquals("After compaction: mob value","new",Bytes.toString(CellUtil.cloneValue(cell))); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testScannerOnBulkLoadRefHFiles() throws Exception { resetConf(); setUp("testScannerOnBulkLoadRefHFiles"); long ts=EnvironmentEdgeManager.currentTime(); byte[] key0=Bytes.toBytes("k0"); byte[] key1=Bytes.toBytes("k1"); String value0="mobValue0"; String value1="mobValue1"; String newValue0="new"; Put put0=new Put(key0); put0.addColumn(Bytes.toBytes(family1),Bytes.toBytes(qf1),ts,Bytes.toBytes(value0)); loadData(admin,bufMut,tableName,new Put[]{put0}); put0=new Put(key0); put0.addColumn(Bytes.toBytes(family1),Bytes.toBytes(qf1),ts,Bytes.toBytes(newValue0)); Put put1=new Put(key1); put1.addColumn(Bytes.toBytes(family1),Bytes.toBytes(qf1),ts,Bytes.toBytes(value1)); loadData(admin,bufMut,tableName,new Put[]{put0,put1}); Get get=new Get(key0); Result result=table.get(get); Cell cell=result.getColumnLatestCell(hcd1.getName(),Bytes.toBytes(qf1)); assertEquals("Before compaction: mob value of k0",newValue0,Bytes.toString(CellUtil.cloneValue(cell))); admin.majorCompact(tableName,hcd1.getName(),Admin.CompactType.MOB); waitUntilMobCompactionFinished(tableName); result=table.get(get); cell=result.getColumnLatestCell(hcd1.getName(),Bytes.toBytes(qf1)); assertEquals("After compaction: mob value of k0",newValue0,Bytes.toString(CellUtil.cloneValue(cell))); get=new Get(key1); get.setAttribute(MobConstants.MOB_SCAN_RAW,Bytes.toBytes(true)); result=table.get(get); cell=result.getColumnLatestCell(hcd1.getName(),Bytes.toBytes(qf1)); Path mobFamilyPath=MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(),tableName,hcd1.getNameAsString()); List paths=new ArrayList(); if (fs.exists(mobFamilyPath)) { FileStatus[] files=fs.listStatus(mobFamilyPath); for ( FileStatus file : files) { if (!StoreFileInfo.isDelFile(file.getPath())) { paths.add(file.getPath()); } } } assertEquals("After compaction: number of mob files:",1,paths.size()); assertEquals("After compaction: mob file name:",MobUtils.getMobFileName(cell),paths.get(0).getName()); }

Class: org.apache.hadoop.hbase.mob.compactions.TestPartitionedMobCompactionRequest

InternalCallVerifier EqualityVerifier 
@Test public void testCompactedPartition(){ CompactionPartitionId partitionId=new CompactionPartitionId("startKey1","date1"); CompactionPartition partition=new CompactionPartition(partitionId); FileStatus file=new FileStatus(1,false,1,1024,1,new Path("/test")); partition.addFile(file); Assert.assertEquals(file,partition.listFiles().get(0)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompactedPartitionId(){ String startKey1="startKey1"; String startKey2="startKey2"; String date1="date1"; String date2="date2"; CompactionPartitionId partitionId1=new CompactionPartitionId(startKey1,date1); CompactionPartitionId partitionId2=new CompactionPartitionId(startKey2,date2); CompactionPartitionId partitionId3=new CompactionPartitionId(startKey1,date2); Assert.assertTrue(partitionId1.equals(partitionId1)); Assert.assertFalse(partitionId1.equals(partitionId2)); Assert.assertFalse(partitionId1.equals(partitionId3)); Assert.assertFalse(partitionId2.equals(partitionId3)); Assert.assertEquals(startKey1,partitionId1.getStartKey()); Assert.assertEquals(date1,partitionId1.getDate()); }

Class: org.apache.hadoop.hbase.mob.mapreduce.TestMobSweepJob

InternalCallVerifier EqualityVerifier 
@Test public void testSweeperJobWithUnusedFile() throws Exception { FileSystem fs=TEST_UTIL.getTestFileSystem(); Configuration configuration=new Configuration(TEST_UTIL.getConfiguration()); Path vistiedFileNamesPath=new Path(MobUtils.getMobHome(configuration),"/hbase/mobcompaction/SweepJob/working/names/1/visited"); Path allFileNamesPath=new Path(MobUtils.getMobHome(configuration),"/hbase/mobcompaction/SweepJob/working/names/1/all"); configuration.set(SweepJob.WORKING_VISITED_DIR_KEY,vistiedFileNamesPath.toString()); configuration.set(SweepJob.WORKING_ALLNAMES_FILE_KEY,allFileNamesPath.toString()); writeFileNames(fs,configuration,allFileNamesPath,new String[]{"1","2","3","4","5","6"}); Path r0=new Path(vistiedFileNamesPath,"r0"); writeFileNames(fs,configuration,r0,new String[]{"1","2","3"}); Path r1=new Path(vistiedFileNamesPath,"r1"); writeFileNames(fs,configuration,r1,new String[]{"1","5"}); Path r2=new Path(vistiedFileNamesPath,"r2"); writeFileNames(fs,configuration,r2,new String[]{"2","3"}); SweepJob sweepJob=new SweepJob(configuration,fs); List toBeArchived=sweepJob.getUnusedFiles(configuration); assertEquals(2,toBeArchived.size()); assertArrayEquals(new String[]{"4","6"},toBeArchived.toArray(new String[0])); }

InternalCallVerifier EqualityVerifier 
@Test public void testSweeperJobWithOutUnusedFile() throws Exception { FileSystem fs=TEST_UTIL.getTestFileSystem(); Configuration configuration=new Configuration(TEST_UTIL.getConfiguration()); Path vistiedFileNamesPath=new Path(MobUtils.getMobHome(configuration),"/hbase/mobcompaction/SweepJob/working/names/0/visited"); Path allFileNamesPath=new Path(MobUtils.getMobHome(configuration),"/hbase/mobcompaction/SweepJob/working/names/0/all"); configuration.set(SweepJob.WORKING_VISITED_DIR_KEY,vistiedFileNamesPath.toString()); configuration.set(SweepJob.WORKING_ALLNAMES_FILE_KEY,allFileNamesPath.toString()); writeFileNames(fs,configuration,allFileNamesPath,new String[]{"1","2","3","4","5","6"}); Path r0=new Path(vistiedFileNamesPath,"r0"); writeFileNames(fs,configuration,r0,new String[]{"1","2","3"}); Path r1=new Path(vistiedFileNamesPath,"r1"); writeFileNames(fs,configuration,r1,new String[]{"1","4","5"}); Path r2=new Path(vistiedFileNamesPath,"r2"); writeFileNames(fs,configuration,r2,new String[]{"2","3","6"}); SweepJob sweepJob=new SweepJob(configuration,fs); List toBeArchived=sweepJob.getUnusedFiles(configuration); assertEquals(0,toBeArchived.size()); }

InternalCallVerifier EqualityVerifier 
@Test public void testSweeperJobWithRedundantFile() throws Exception { FileSystem fs=TEST_UTIL.getTestFileSystem(); Configuration configuration=new Configuration(TEST_UTIL.getConfiguration()); Path vistiedFileNamesPath=new Path(MobUtils.getMobHome(configuration),"/hbase/mobcompaction/SweepJob/working/names/2/visited"); Path allFileNamesPath=new Path(MobUtils.getMobHome(configuration),"/hbase/mobcompaction/SweepJob/working/names/2/all"); configuration.set(SweepJob.WORKING_VISITED_DIR_KEY,vistiedFileNamesPath.toString()); configuration.set(SweepJob.WORKING_ALLNAMES_FILE_KEY,allFileNamesPath.toString()); writeFileNames(fs,configuration,allFileNamesPath,new String[]{"1","2","3","4","5","6"}); Path r0=new Path(vistiedFileNamesPath,"r0"); writeFileNames(fs,configuration,r0,new String[]{"1","2","3"}); Path r1=new Path(vistiedFileNamesPath,"r1"); writeFileNames(fs,configuration,r1,new String[]{"1","5","6","7"}); Path r2=new Path(vistiedFileNamesPath,"r2"); writeFileNames(fs,configuration,r2,new String[]{"2","3","4"}); SweepJob sweepJob=new SweepJob(configuration,fs); List toBeArchived=sweepJob.getUnusedFiles(configuration); assertEquals(0,toBeArchived.size()); }

Class: org.apache.hadoop.hbase.mob.mapreduce.TestMobSweepMapper

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void TestMap() throws Exception { String prefix="0000"; final String fileName="19691231f2cd014ea28f42788214560a21a44cef"; final String mobFilePath=prefix + fileName; ImmutableBytesWritable r=new ImmutableBytesWritable(Bytes.toBytes("r")); final KeyValue[] kvList=new KeyValue[1]; kvList[0]=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("family"),Bytes.toBytes("column"),Bytes.toBytes(mobFilePath)); Result columns=mock(Result.class); when(columns.rawCells()).thenReturn(kvList); Configuration configuration=new Configuration(TEST_UTIL.getConfiguration()); ZooKeeperWatcher zkw=new ZooKeeperWatcher(configuration,"1",new DummyMobAbortable()); TableName tn=TableName.valueOf("testSweepMapper"); TableName lockName=MobUtils.getTableLockName(tn); String znode=ZKUtil.joinZNode(zkw.tableLockZNode,lockName.getNameAsString()); configuration.set(SweepJob.SWEEP_JOB_ID,"1"); configuration.set(SweepJob.SWEEP_JOB_TABLE_NODE,znode); ServerName serverName=SweepJob.getCurrentServerName(configuration); configuration.set(SweepJob.SWEEP_JOB_SERVERNAME,serverName.toString()); TableLockManager tableLockManager=TableLockManager.createTableLockManager(configuration,zkw,serverName); TableLock lock=tableLockManager.writeLock(lockName,"Run sweep tool"); lock.acquire(); try { Mapper.Context ctx=mock(Mapper.Context.class); when(ctx.getConfiguration()).thenReturn(configuration); SweepMapper map=new SweepMapper(); doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { Text text=(Text)invocation.getArguments()[0]; KeyValue kv=(KeyValue)invocation.getArguments()[1]; assertEquals(Bytes.toString(text.getBytes(),0,text.getLength()),fileName); assertEquals(0,Bytes.compareTo(kv.getKey(),kvList[0].getKey())); return null; } } ).when(ctx).write(any(Text.class),any(KeyValue.class)); map.map(r,columns,ctx); } finally { lock.release(); } }

Class: org.apache.hadoop.hbase.mob.mapreduce.TestMobSweepReducer

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testRun() throws Exception { TableName tn=TableName.valueOf(tableName); byte[] mobValueBytes=new byte[100]; Path mobFamilyPath=MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(),tn,family); Put put=new Put(Bytes.toBytes(row)); put.addColumn(Bytes.toBytes(family),Bytes.toBytes(qf),1,mobValueBytes); Put put2=new Put(Bytes.toBytes(row + "ignore")); put2.addColumn(Bytes.toBytes(family),Bytes.toBytes(qf),1,mobValueBytes); table.mutate(put); table.mutate(put2); table.flush(); admin.flush(tn); FileStatus[] fileStatuses=TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath); assertEquals(1,fileStatuses.length); String mobFile1=fileStatuses[0].getPath().getName(); Configuration configuration=new Configuration(TEST_UTIL.getConfiguration()); configuration.setFloat(MobConstants.MOB_SWEEP_TOOL_COMPACTION_RATIO,0.6f); configuration.setStrings(TableInputFormat.INPUT_TABLE,tableName); configuration.setStrings(TableInputFormat.SCAN_COLUMN_FAMILY,family); configuration.setStrings(SweepJob.WORKING_VISITED_DIR_KEY,"jobWorkingNamesDir"); configuration.setStrings(SweepJob.WORKING_FILES_DIR_KEY,"compactionFileDir"); configuration.setStrings(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,JavaSerialization.class.getName()); configuration.set(SweepJob.WORKING_VISITED_DIR_KEY,"compactionVisitedDir"); configuration.setLong(MobConstants.MOB_SWEEP_TOOL_COMPACTION_START_DATE,System.currentTimeMillis() + 24 * 3600 * 1000); ZooKeeperWatcher zkw=new ZooKeeperWatcher(configuration,"1",new DummyMobAbortable()); TableName lockName=MobUtils.getTableLockName(tn); String znode=ZKUtil.joinZNode(zkw.tableLockZNode,lockName.getNameAsString()); configuration.set(SweepJob.SWEEP_JOB_ID,"1"); configuration.set(SweepJob.SWEEP_JOB_TABLE_NODE,znode); ServerName serverName=SweepJob.getCurrentServerName(configuration); configuration.set(SweepJob.SWEEP_JOB_SERVERNAME,serverName.toString()); TableLockManager tableLockManager=TableLockManager.createTableLockManager(configuration,zkw,serverName); TableLock lock=tableLockManager.writeLock(lockName,"Run sweep tool"); lock.acquire(); try { Counter counter=new GenericCounter(); Reducer.Context ctx=mock(Reducer.Context.class); when(ctx.getConfiguration()).thenReturn(configuration); when(ctx.getCounter(Matchers.any(SweepCounter.class))).thenReturn(counter); when(ctx.nextKey()).thenReturn(true).thenReturn(false); when(ctx.getCurrentKey()).thenReturn(new Text(mobFile1)); byte[] refBytes=Bytes.toBytes(mobFile1); long valueLength=refBytes.length; byte[] newValue=Bytes.add(Bytes.toBytes(valueLength),refBytes); KeyValue kv2=new KeyValue(Bytes.toBytes(row),Bytes.toBytes(family),Bytes.toBytes(qf),1,KeyValue.Type.Put,newValue); List list=new ArrayList(); list.add(kv2); when(ctx.getValues()).thenReturn(list); SweepReducer reducer=new SweepReducer(); reducer.run(ctx); } finally { lock.release(); } FileStatus[] filsStatuses2=TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath); String mobFile2=filsStatuses2[0].getPath().getName(); assertEquals(1,filsStatuses2.length); assertEquals(false,mobFile2.equalsIgnoreCase(mobFile1)); String workingPath=configuration.get(SweepJob.WORKING_VISITED_DIR_KEY); FileStatus[] statuses=TEST_UTIL.getTestFileSystem().listStatus(new Path(workingPath)); Set files=new TreeSet(); for ( FileStatus st : statuses) { files.addAll(getKeyFromSequenceFile(TEST_UTIL.getTestFileSystem(),st.getPath(),configuration)); } assertEquals(1,files.size()); assertEquals(true,files.contains(mobFile1)); }

Class: org.apache.hadoop.hbase.monitoring.TestMemoryBoundedLogMessageBuffer

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testBuffer(){ MemoryBoundedLogMessageBuffer buf=new MemoryBoundedLogMessageBuffer(TEN_KB); for (int i=0; i < 1000; i++) { buf.add("hello " + i); } assertTrue("Usage too big: " + buf.estimateHeapUsage(),buf.estimateHeapUsage() < TEN_KB); assertTrue("Too many retained: " + buf.getMessages().size(),buf.getMessages().size() < 100); StringWriter sw=new StringWriter(); buf.dumpTo(new PrintWriter(sw)); String dump=sw.toString(); String eol=System.getProperty("line.separator"); assertFalse("The early log messages should be evicted",dump.contains("hello 1" + eol)); assertTrue("The late log messages should be retained",dump.contains("hello 999" + eol)); }

Class: org.apache.hadoop.hbase.monitoring.TestTaskMonitor

InternalCallVerifier EqualityVerifier 
@Test public void testTaskLimit() throws Exception { TaskMonitor tm=new TaskMonitor(); for (int i=0; i < TaskMonitor.MAX_TASKS + 10; i++) { tm.createStatus("task " + i); } assertEquals(TaskMonitor.MAX_TASKS,tm.getTasks().size()); assertEquals("task 10",tm.getTasks().get(0).getDescription()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTasksGetAbortedOnLeak() throws InterruptedException { final TaskMonitor tm=new TaskMonitor(); assertTrue("Task monitor should start empty",tm.getTasks().isEmpty()); final AtomicBoolean threadSuccess=new AtomicBoolean(false); Thread t=new Thread(){ @Override public void run(){ MonitoredTask task=tm.createStatus("Test task"); assertEquals(MonitoredTask.State.RUNNING,task.getState()); threadSuccess.set(true); } } ; t.start(); t.join(); assertTrue(threadSuccess.get()); System.gc(); System.gc(); System.gc(); MonitoredTask taskFromTm=tm.getTasks().get(0); assertEquals(MonitoredTask.State.ABORTED,taskFromTm.getState()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTaskMonitorBasics(){ TaskMonitor tm=new TaskMonitor(); assertTrue("Task monitor should start empty",tm.getTasks().isEmpty()); MonitoredTask task=tm.createStatus("Test task"); MonitoredTask taskFromTm=tm.getTasks().get(0); assertEquals(task.getDescription(),taskFromTm.getDescription()); assertEquals(-1,taskFromTm.getCompletionTimestamp()); assertEquals(MonitoredTask.State.RUNNING,taskFromTm.getState()); task.markComplete("Finished!"); assertEquals(MonitoredTask.State.COMPLETE,task.getState()); assertEquals(1,tm.getTasks().size()); task.expireNow(); assertEquals(0,tm.getTasks().size()); }

Class: org.apache.hadoop.hbase.namespace.TestNamespaceAuditor

InternalCallVerifier BooleanVerifier 
@Test public void testValidQuotas() throws Exception { boolean exceptionCaught=false; FileSystem fs=UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); Path rootDir=UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); NamespaceDescriptor nspDesc=NamespaceDescriptor.create(prefix + "vq1").addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"hihdufh").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"2").build(); try { ADMIN.createNamespace(nspDesc); } catch ( Exception exp) { LOG.warn(exp); exceptionCaught=true; } finally { assertTrue(exceptionCaught); assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir,nspDesc.getName()))); } nspDesc=NamespaceDescriptor.create(prefix + "vq2").addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"-456").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"2").build(); try { ADMIN.createNamespace(nspDesc); } catch ( Exception exp) { LOG.warn(exp); exceptionCaught=true; } finally { assertTrue(exceptionCaught); assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir,nspDesc.getName()))); } nspDesc=NamespaceDescriptor.create(prefix + "vq3").addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"10").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"sciigd").build(); try { ADMIN.createNamespace(nspDesc); } catch ( Exception exp) { LOG.warn(exp); exceptionCaught=true; } finally { assertTrue(exceptionCaught); assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir,nspDesc.getName()))); } nspDesc=NamespaceDescriptor.create(prefix + "vq4").addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"10").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"-1500").build(); try { ADMIN.createNamespace(nspDesc); } catch ( Exception exp) { LOG.warn(exp); exceptionCaught=true; } finally { assertTrue(exceptionCaught); assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir,nspDesc.getName()))); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRestoreSnapshot() throws Exception { String nsp=prefix + "_testRestoreSnapshot"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"10").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.",ADMIN.getNamespaceDescriptor(nsp)); TableName tableName1=TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"); HTableDescriptor tableDescOne=new HTableDescriptor(tableName1); ADMIN.createTable(tableDescOne,Bytes.toBytes("AAA"),Bytes.toBytes("ZZZ"),4); NamespaceTableAndRegionInfo nstate=getNamespaceState(nsp); assertEquals("Intial region count should be 4.",4,nstate.getRegionCount()); String snapshot="snapshot_testRestoreSnapshot"; ADMIN.snapshot(snapshot,tableName1); List regions=ADMIN.getTableRegions(tableName1); Collections.sort(regions); ADMIN.split(tableName1,Bytes.toBytes("JJJ")); Thread.sleep(2000); assertEquals("Total regions count should be 5.",5,nstate.getRegionCount()); ADMIN.disableTable(tableName1); ADMIN.restoreSnapshot(snapshot); assertEquals("Total regions count should be 4 after restore.",4,nstate.getRegionCount()); ADMIN.enableTable(tableName1); ADMIN.deleteSnapshot(snapshot); }

InternalCallVerifier NullVerifier ExceptionVerifier HybridVerifier 
@Test(expected=QuotaExceededException.class) public void testCloneSnapshotQuotaExceed() throws Exception { String nsp=prefix + "_testTableQuotaExceedWithCloneSnapshot"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"1").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.",ADMIN.getNamespaceDescriptor(nsp)); TableName tableName=TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"); TableName cloneTableName=TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"); HTableDescriptor tableDescOne=new HTableDescriptor(tableName); ADMIN.createTable(tableDescOne); String snapshot="snapshot_testTableQuotaExceedWithCloneSnapshot"; ADMIN.snapshot(snapshot,tableName); ADMIN.cloneSnapshot(snapshot,cloneTableName); ADMIN.deleteSnapshot(snapshot); }

InternalCallVerifier NullVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
@Test(expected=QuotaExceededException.class) public void testExceedTableQuotaInNamespace() throws Exception { String nsp=prefix + "_testExceedTableQuotaInNamespace"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"1").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.",ADMIN.getNamespaceDescriptor(nsp)); assertEquals(ADMIN.listNamespaceDescriptors().length,3); HTableDescriptor tableDescOne=new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); HTableDescriptor tableDescTwo=new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); ADMIN.createTable(tableDescOne); ADMIN.createTable(tableDescTwo,Bytes.toBytes("AAA"),Bytes.toBytes("ZZZ"),4); }

InternalCallVerifier EqualityVerifier 
@Test public void testRegionMerge() throws Exception { String nsp1=prefix + "_regiontest"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"3").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"2").build(); ADMIN.createNamespace(nspDesc); final TableName tableTwo=TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2"); byte[] columnFamily=Bytes.toBytes("info"); HTableDescriptor tableDescOne=new HTableDescriptor(tableTwo); tableDescOne.addFamily(new HColumnDescriptor(columnFamily)); final int initialRegions=3; ADMIN.createTable(tableDescOne,Bytes.toBytes("1"),Bytes.toBytes("2000"),initialRegions); Connection connection=ConnectionFactory.createConnection(UTIL.getConfiguration()); try (Table table=connection.getTable(tableTwo)){ UTIL.loadNumericRows(table,Bytes.toBytes("info"),1000,1999); } ADMIN.flush(tableTwo); List hris=ADMIN.getTableRegions(tableTwo); Collections.sort(hris); final Set encodedRegionNamesToMerge=Sets.newHashSet(hris.get(0).getEncodedName(),hris.get(1).getEncodedName()); ADMIN.mergeRegions(hris.get(0).getEncodedNameAsBytes(),hris.get(1).getEncodedNameAsBytes(),false); UTIL.waitFor(10000,100,new Waiter.ExplainingPredicate(){ @Override public boolean evaluate() throws Exception { RegionStates regionStates=UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); for ( HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) { return false; } if (!regionStates.isRegionInState(hri,RegionState.State.OPEN)) { return false; } } return true; } @Override public String explainFailure() throws Exception { RegionStates regionStates=UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); for ( HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) { return hri + " which is expected to be merged is still online"; } if (!regionStates.isRegionInState(hri,RegionState.State.OPEN)) { return hri + " is still in not opened"; } } return "Unknown"; } } ); hris=ADMIN.getTableRegions(tableTwo); assertEquals(initialRegions - 1,hris.size()); Collections.sort(hris); final HRegionInfo hriToSplit=hris.get(1); ADMIN.split(tableTwo,Bytes.toBytes("500")); UTIL.waitFor(10000,100,new Waiter.ExplainingPredicate(){ @Override public boolean evaluate() throws Exception { RegionStates regionStates=UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); for ( HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) { return false; } if (!regionStates.isRegionInState(hri,RegionState.State.OPEN)) { return false; } } return true; } @Override public String explainFailure() throws Exception { RegionStates regionStates=UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); for ( HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) { return hriToSplit + " which is expected to be split is still online"; } if (!regionStates.isRegionInState(hri,RegionState.State.OPEN)) { return hri + " is still in not opened"; } } return "Unknown"; } } ); hris=ADMIN.getTableRegions(tableTwo); assertEquals(initialRegions,hris.size()); Collections.sort(hris); MiniHBaseCluster cluster=UTIL.getHBaseCluster(); HRegionServer regionServer=cluster.getRegionServer(0); RegionServerCoprocessorHost cpHost=regionServer.getRegionServerCoprocessorHost(); Coprocessor coprocessor=cpHost.findCoprocessor(CPRegionServerObserver.class.getName()); CPRegionServerObserver regionServerObserver=(CPRegionServerObserver)coprocessor; regionServerObserver.failMerge(true); regionServerObserver.triggered=false; ADMIN.mergeRegions(hris.get(1).getEncodedNameAsBytes(),hris.get(2).getEncodedNameAsBytes(),false); regionServerObserver.waitUtilTriggered(); hris=ADMIN.getTableRegions(tableTwo); assertEquals(initialRegions,hris.size()); Collections.sort(hris); HRegionInfo hriToSplit2=hris.get(1); ADMIN.split(tableTwo,TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(),hriToSplit2.getEndKey(),true)); Thread.sleep(2000); assertEquals(initialRegions,ADMIN.getTableRegions(tableTwo).size()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCloneSnapshot() throws Exception { String nsp=prefix + "_testCloneSnapshot"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"2").addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"20").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.",ADMIN.getNamespaceDescriptor(nsp)); TableName tableName=TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"); TableName cloneTableName=TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"); HTableDescriptor tableDescOne=new HTableDescriptor(tableName); ADMIN.createTable(tableDescOne,Bytes.toBytes("AAA"),Bytes.toBytes("ZZZ"),4); String snapshot="snapshot_testCloneSnapshot"; ADMIN.snapshot(snapshot,tableName); ADMIN.cloneSnapshot(snapshot,cloneTableName); int tableLength; try (RegionLocator locator=ADMIN.getConnection().getRegionLocator(tableName)){ tableLength=locator.getStartKeys().length; } assertEquals(tableName.getNameAsString() + " should have four regions.",4,tableLength); try (RegionLocator locator=ADMIN.getConnection().getRegionLocator(cloneTableName)){ tableLength=locator.getStartKeys().length; } assertEquals(cloneTableName.getNameAsString() + " should have four regions.",4,tableLength); NamespaceTableAndRegionInfo nstate=getNamespaceState(nsp); assertEquals("Total tables count should be 2.",2,nstate.getTables().size()); assertEquals("Total regions count should be.",8,nstate.getRegionCount()); ADMIN.deleteSnapshot(snapshot); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testTableOperations() throws Exception { String nsp=prefix + "_np2"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"5").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"2").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.",ADMIN.getNamespaceDescriptor(nsp)); assertEquals(ADMIN.listNamespaceDescriptors().length,3); HTableDescriptor tableDescOne=new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); HTableDescriptor tableDescTwo=new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); HTableDescriptor tableDescThree=new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3")); ADMIN.createTable(tableDescOne); boolean constraintViolated=false; try { ADMIN.createTable(tableDescTwo,Bytes.toBytes("AAA"),Bytes.toBytes("ZZZ"),5); } catch ( Exception exp) { assertTrue(exp instanceof IOException); constraintViolated=true; } finally { assertTrue("Constraint not violated for table " + tableDescTwo.getTableName(),constraintViolated); } ADMIN.createTable(tableDescTwo,Bytes.toBytes("AAA"),Bytes.toBytes("ZZZ"),4); NamespaceTableAndRegionInfo nspState=getQuotaManager().getState(nsp); assertNotNull(nspState); assertTrue(nspState.getTables().size() == 2); assertTrue(nspState.getRegionCount() == 5); constraintViolated=false; try { ADMIN.createTable(tableDescThree); } catch ( Exception exp) { assertTrue(exp instanceof IOException); constraintViolated=true; } finally { assertTrue("Constraint not violated for table " + tableDescThree.getTableName(),constraintViolated); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRestoreSnapshotQuotaExceed() throws Exception { String nsp=prefix + "_testRestoreSnapshotQuotaExceed"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"10").build(); ADMIN.createNamespace(nspDesc); NamespaceDescriptor ndesc=ADMIN.getNamespaceDescriptor(nsp); assertNotNull("Namespace descriptor found null.",ndesc); TableName tableName1=TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"); HTableDescriptor tableDescOne=new HTableDescriptor(tableName1); ADMIN.createTable(tableDescOne,Bytes.toBytes("AAA"),Bytes.toBytes("ZZZ"),4); NamespaceTableAndRegionInfo nstate=getNamespaceState(nsp); assertEquals("Intial region count should be 4.",4,nstate.getRegionCount()); String snapshot="snapshot_testRestoreSnapshotQuotaExceed"; ADMIN.snapshot(snapshot,tableName1); List regions=ADMIN.getTableRegions(tableName1); Collections.sort(regions); ADMIN.split(tableName1,Bytes.toBytes("JJJ")); Thread.sleep(2000); assertEquals("Total regions count should be 5.",5,nstate.getRegionCount()); ndesc.setConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"2"); ADMIN.modifyNamespace(ndesc); ADMIN.disableTable(tableName1); try { ADMIN.restoreSnapshot(snapshot); fail("Region quota is exceeded so QuotaExceededException should be thrown but HBaseAdmin" + " wraps IOException into RestoreSnapshotException"); } catch ( RestoreSnapshotException ignore) { } ADMIN.enableTable(tableName1); ADMIN.deleteSnapshot(snapshot); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteTable() throws Exception { String namespace=prefix + "_dummy"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(namespace).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"100").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"3").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.",ADMIN.getNamespaceDescriptor(namespace)); NamespaceTableAndRegionInfo stateInfo=getNamespaceState(nspDesc.getName()); assertNotNull("Namespace state found null for " + namespace,stateInfo); HTableDescriptor tableDescOne=new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table1")); HTableDescriptor tableDescTwo=new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table2")); ADMIN.createTable(tableDescOne); ADMIN.createTable(tableDescTwo,Bytes.toBytes("AAA"),Bytes.toBytes("ZZZ"),5); stateInfo=getNamespaceState(nspDesc.getName()); assertNotNull("Namespace state found to be null.",stateInfo); assertEquals(2,stateInfo.getTables().size()); assertEquals(5,stateInfo.getRegionCountOfTable(tableDescTwo.getTableName())); assertEquals(6,stateInfo.getRegionCount()); ADMIN.disableTable(tableDescOne.getTableName()); deleteTable(tableDescOne.getTableName()); stateInfo=getNamespaceState(nspDesc.getName()); assertNotNull("Namespace state found to be null.",stateInfo); assertEquals(5,stateInfo.getRegionCount()); assertEquals(1,stateInfo.getTables().size()); ADMIN.disableTable(tableDescTwo.getTableName()); deleteTable(tableDescTwo.getTableName()); ADMIN.deleteNamespace(namespace); stateInfo=getNamespaceState(namespace); assertNull("Namespace state not found to be null.",stateInfo); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRegionOperations() throws Exception { String nsp1=prefix + "_regiontest"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"2").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"2").build(); ADMIN.createNamespace(nspDesc); boolean constraintViolated=false; final TableName tableOne=TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1"); byte[] columnFamily=Bytes.toBytes("info"); HTableDescriptor tableDescOne=new HTableDescriptor(tableOne); tableDescOne.addFamily(new HColumnDescriptor(columnFamily)); NamespaceTableAndRegionInfo stateInfo; try { ADMIN.createTable(tableDescOne,Bytes.toBytes("1"),Bytes.toBytes("1000"),7); } catch ( Exception exp) { assertTrue(exp instanceof DoNotRetryIOException); LOG.info(exp); constraintViolated=true; } finally { assertTrue(constraintViolated); } assertFalse(ADMIN.tableExists(tableOne)); ADMIN.createTable(tableDescOne); Connection connection=ConnectionFactory.createConnection(UTIL.getConfiguration()); Table htable=connection.getTable(tableOne); UTIL.loadNumericRows(htable,Bytes.toBytes("info"),1,1000); ADMIN.flush(tableOne); stateInfo=getNamespaceState(nsp1); assertEquals(1,stateInfo.getTables().size()); assertEquals(1,stateInfo.getRegionCount()); restartMaster(); HRegion actualRegion=UTIL.getHBaseCluster().getRegions(tableOne).get(0); CustomObserver observer=(CustomObserver)actualRegion.getCoprocessorHost().findCoprocessor(CustomObserver.class.getName()); assertNotNull(observer); ADMIN.split(tableOne,Bytes.toBytes("500")); observer.postSplit.await(); assertEquals(2,ADMIN.getTableRegions(tableOne).size()); actualRegion=UTIL.getHBaseCluster().getRegions(tableOne).get(0); observer=(CustomObserver)actualRegion.getCoprocessorHost().findCoprocessor(CustomObserver.class.getName()); assertNotNull(observer); ADMIN.compact(tableOne); observer.postCompact.await(); ADMIN.split(tableOne,getSplitKey(actualRegion.getRegionInfo().getStartKey(),actualRegion.getRegionInfo().getEndKey())); observer.postSplit.await(); List hris=ADMIN.getTableRegions(tableOne); assertEquals(2,hris.size()); assertTrue("split completed",observer.preSplitBeforePONR.getCount() == 1); htable.close(); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRecreateTableWithSameNameAfterFirstTimeFailure() throws Exception { String nsp1=prefix + "_testRecreateTable"; NamespaceDescriptor nspDesc=NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,"20").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES,"1").build(); ADMIN.createNamespace(nspDesc); final TableName tableOne=TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1"); byte[] columnFamily=Bytes.toBytes("info"); HTableDescriptor tableDescOne=new HTableDescriptor(tableOne); tableDescOne.addFamily(new HColumnDescriptor(columnFamily)); MasterSyncObserver.throwExceptionInPreCreateTableHandler=true; try { try { ADMIN.createTable(tableDescOne); fail("Table " + tableOne.toString() + "creation should fail."); } catch ( Exception exp) { LOG.error(exp); } assertFalse(ADMIN.tableExists(tableOne)); NamespaceTableAndRegionInfo nstate=getNamespaceState(nsp1); assertEquals("First table creation failed in namespace so number of tables in namespace " + "should be 0.",0,nstate.getTables().size()); MasterSyncObserver.throwExceptionInPreCreateTableHandler=false; try { ADMIN.createTable(tableDescOne); } catch ( Exception e) { fail("Table " + tableOne.toString() + "creation should succeed."); LOG.error(e); } assertTrue(ADMIN.tableExists(tableOne)); nstate=getNamespaceState(nsp1); assertEquals("First table was created successfully so table size in namespace should " + "be one now.",1,nstate.getTables().size()); } finally { MasterSyncObserver.throwExceptionInPreCreateTableHandler=false; if (ADMIN.tableExists(tableOne)) { ADMIN.disableTable(tableOne); deleteTable(tableOne); } ADMIN.deleteNamespace(nsp1); } }

Class: org.apache.hadoop.hbase.nio.TestMultiByteBuff

InternalCallVerifier EqualityVerifier 
@Test public void testSkipNBytes(){ ByteBuffer bb1=ByteBuffer.allocate(15); ByteBuffer bb2=ByteBuffer.allocate(15); bb1.putInt(4); long l1=45L, l2=100L, l3=12345L; bb1.putLong(l1); bb1.putShort((short)2); byte[] b=Bytes.toBytes(l2); bb1.put(b,0,1); bb2.put(b,1,7); bb2.putLong(l3); MultiByteBuff multi=new MultiByteBuff(bb1,bb2); assertEquals(4,multi.getInt()); assertEquals(l1,multi.getLong()); multi.skip(10); assertEquals(l3,multi.getLong()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetWithPosOnMultiBuffers() throws IOException { byte[] b=new byte[4]; byte[] b1=new byte[4]; ByteBuffer bb1=ByteBuffer.wrap(b); ByteBuffer bb2=ByteBuffer.wrap(b1); MultiByteBuff mbb1=new MultiByteBuff(bb1,bb2); mbb1.position(2); mbb1.putInt(4); int res=mbb1.getInt(2); byte[] bres=new byte[4]; bres[0]=mbb1.get(2); bres[1]=mbb1.get(3); bres[2]=mbb1.get(4); bres[3]=mbb1.get(5); int expected=Bytes.toInt(bres); assertEquals(res,expected); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSubBuffer(){ ByteBuffer bb1=ByteBuffer.allocateDirect(10); ByteBuffer bb2=ByteBuffer.allocateDirect(10); MultiByteBuff multi=new MultiByteBuff(bb1,bb2); long l1=1234L, l2=100L; multi.putLong(l1); multi.putLong(l2); multi.rewind(); ByteBuffer sub=multi.asSubByteBuffer(Bytes.SIZEOF_LONG); assertTrue(bb1 == sub); assertEquals(l1,ByteBufferUtils.toLong(sub,sub.position())); multi.skip(Bytes.SIZEOF_LONG); sub=multi.asSubByteBuffer(Bytes.SIZEOF_LONG); assertFalse(bb1 == sub); assertFalse(bb2 == sub); assertEquals(l2,ByteBufferUtils.toLong(sub,sub.position())); multi.rewind(); ObjectIntPair p=new ObjectIntPair(); multi.asSubByteBuffer(8,Bytes.SIZEOF_LONG,p); assertFalse(bb1 == p.getFirst()); assertFalse(bb2 == p.getFirst()); assertEquals(0,p.getSecond()); assertEquals(l2,ByteBufferUtils.toLong(sub,p.getSecond())); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testArrayBasedMethods(){ byte[] b=new byte[15]; ByteBuffer bb1=ByteBuffer.wrap(b,1,10).slice(); ByteBuffer bb2=ByteBuffer.allocate(15); ByteBuff mbb1=new MultiByteBuff(bb1,bb2); assertFalse(mbb1.hasArray()); try { mbb1.array(); fail(); } catch ( UnsupportedOperationException e) { } try { mbb1.arrayOffset(); fail(); } catch ( UnsupportedOperationException e) { } mbb1=new SingleByteBuff(bb1); assertTrue(mbb1.hasArray()); assertEquals(1,mbb1.arrayOffset()); assertEquals(b,mbb1.array()); mbb1=new SingleByteBuff(ByteBuffer.allocateDirect(10)); assertFalse(mbb1.hasArray()); try { mbb1.array(); fail(); } catch ( UnsupportedOperationException e) { } try { mbb1.arrayOffset(); fail(); } catch ( UnsupportedOperationException e) { } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetIntStrictlyForwardWithPosOnMultiBuffers() throws IOException { byte[] b=new byte[4]; byte[] b1=new byte[8]; ByteBuffer bb1=ByteBuffer.wrap(b); ByteBuffer bb2=ByteBuffer.wrap(b1); MultiByteBuff mbb1=new MultiByteBuff(bb1,bb2); mbb1.position(2); mbb1.putInt(4); mbb1.position(7); mbb1.put((byte)2); mbb1.putInt(3); mbb1.rewind(); mbb1.getIntAfterPosition(4); byte res=mbb1.get(7); assertEquals((byte)2,res); mbb1.position(7); int intRes=mbb1.getIntAfterPosition(1); assertEquals(3,intRes); }

InternalCallVerifier EqualityVerifier 
@Test public void testSliceDuplicateMethods() throws Exception { ByteBuffer bb1=ByteBuffer.allocateDirect(10); ByteBuffer bb2=ByteBuffer.allocateDirect(15); MultiByteBuff multi=new MultiByteBuff(bb1,bb2); long l1=1234L, l2=100L; multi.put((byte)2); multi.putLong(l1); multi.putLong(l2); multi.putInt(45); multi.position(1); multi.limit(multi.position() + (2 * Bytes.SIZEOF_LONG)); MultiByteBuff sliced=multi.slice(); assertEquals(0,sliced.position()); assertEquals((2 * Bytes.SIZEOF_LONG),sliced.limit()); assertEquals(l1,sliced.getLong()); assertEquals(l2,sliced.getLong()); MultiByteBuff dup=multi.duplicate(); assertEquals(1,dup.position()); assertEquals(dup.position() + (2 * Bytes.SIZEOF_LONG),dup.limit()); assertEquals(l1,dup.getLong()); assertEquals(l2,dup.getLong()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPositonalCopyToByteArray() throws Exception { byte[] b=new byte[4]; byte[] b1=new byte[8]; ByteBuffer bb1=ByteBuffer.wrap(b); ByteBuffer bb2=ByteBuffer.wrap(b1); MultiByteBuff mbb1=new MultiByteBuff(bb1,bb2); mbb1.position(2); mbb1.putInt(4); mbb1.position(7); mbb1.put((byte)2); mbb1.putInt(3); byte[] dst=new byte[4]; mbb1.get(2,dst,0,4); assertEquals(4,Bytes.toInt(dst)); assertEquals(12,mbb1.position()); mbb1.position(1); dst=new byte[4]; mbb1.get(8,dst,0,4); assertEquals(3,Bytes.toInt(dst)); assertEquals(1,mbb1.position()); mbb1.position(12); dst=new byte[1]; mbb1.get(7,dst,0,1); assertEquals(2,dst[0]); assertEquals(12,mbb1.position()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testWritesAndReads(){ ByteBuffer bb1=ByteBuffer.allocate(15); ByteBuffer bb2=ByteBuffer.allocate(15); int i1=4; bb1.putInt(i1); long l1=45L, l2=100L, l3=12345L; bb1.putLong(l1); short s1=2; bb1.putShort(s1); byte[] b=Bytes.toBytes(l2); bb1.put(b,0,1); bb2.put(b,1,7); bb2.putLong(l3); MultiByteBuff mbb=new MultiByteBuff(bb1,bb2); assertEquals(l1,mbb.getLong(4)); assertEquals(l2,mbb.getLong(14)); assertEquals(l3,mbb.getLong(22)); assertEquals(i1,mbb.getInt(0)); assertEquals(s1,mbb.getShort(12)); assertEquals(i1,mbb.getInt()); assertEquals(l1,mbb.getLong()); assertEquals(s1,mbb.getShort()); assertEquals(l2,mbb.getLong()); assertEquals(l3,mbb.getLong()); bb1=ByteBuffer.allocate(15); bb2=ByteBuffer.allocate(15); mbb=new MultiByteBuff(bb1,bb2); byte b1=5, b2=31; mbb.put(b1); mbb.putLong(l1); mbb.putInt(i1); mbb.putLong(l2); mbb.put(b2); mbb.position(mbb.position() + 2); try { mbb.putLong(l3); fail("'Should have thrown BufferOverflowException"); } catch ( BufferOverflowException e) { } mbb.position(mbb.position() - 2); mbb.putLong(l3); mbb.rewind(); assertEquals(b1,mbb.get()); assertEquals(l1,mbb.getLong()); assertEquals(i1,mbb.getInt()); assertEquals(l2,mbb.getLong()); assertEquals(b2,mbb.get()); assertEquals(l3,mbb.getLong()); mbb.put(21,b1); mbb.position(21); assertEquals(b1,mbb.get()); mbb.put(b); assertEquals(l2,mbb.getLong(22)); }

InternalCallVerifier EqualityVerifier 
@Test public void testMarkAndResetWithMBB(){ ByteBuffer bb1=ByteBuffer.allocateDirect(15); ByteBuffer bb2=ByteBuffer.allocateDirect(15); bb1.putInt(4); long l1=45L, l2=100L, l3=12345L; bb1.putLong(l1); bb1.putShort((short)2); byte[] b=Bytes.toBytes(l2); bb1.put(b,0,1); bb2.put(b,1,7); bb2.putLong(l3); ByteBuff multi=new MultiByteBuff(bb1,bb2); assertEquals(4,multi.getInt()); assertEquals(l1,multi.getLong()); multi.mark(); assertEquals((short)2,multi.getShort()); multi.reset(); assertEquals((short)2,multi.getShort()); multi.mark(); assertEquals(l2,multi.getLong()); multi.reset(); assertEquals(l2,multi.getLong()); multi.mark(); assertEquals(l3,multi.getLong()); multi.reset(); assertEquals(l3,multi.getLong()); multi.mark(); assertEquals(l2,multi.getLong(14)); multi.reset(); assertEquals(l3,multi.getLong(22)); multi.reset(); assertEquals(l2,multi.getLong(14)); multi.mark(); assertEquals(l3,multi.getLong(22)); multi.reset(); }

InternalCallVerifier EqualityVerifier 
@Test public void testMoveBack(){ ByteBuffer bb1=ByteBuffer.allocate(15); ByteBuffer bb2=ByteBuffer.allocate(15); bb1.putInt(4); long l1=45L, l2=100L, l3=12345L; bb1.putLong(l1); bb1.putShort((short)2); byte[] b=Bytes.toBytes(l2); bb1.put(b,0,1); bb2.put(b,1,7); bb2.putLong(l3); MultiByteBuff multi=new MultiByteBuff(bb1,bb2); assertEquals(4,multi.getInt()); assertEquals(l1,multi.getLong()); multi.skip(10); multi.moveBack(4); multi.moveBack(6); multi.moveBack(8); assertEquals(l1,multi.getLong()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testToBytes() throws Exception { byte[] b=new byte[4]; byte[] b1=new byte[8]; for (int i=0; i < b.length; i++) { b[i]=(byte)i; } for (int i=0; i < b1.length; i++) { b1[i]=(byte)(b1.length + i); } ByteBuffer bb1=ByteBuffer.wrap(b); ByteBuffer bb2=ByteBuffer.wrap(b1); MultiByteBuff mbb1=new MultiByteBuff(bb1,bb2); byte[] actual=mbb1.toBytes(6,4); assertTrue(Bytes.equals(actual,0,actual.length,b1,2,4)); actual=mbb1.toBytes(5,7); assertTrue(Bytes.equals(actual,0,actual.length,b1,1,7)); actual=mbb1.toBytes(2,7); byte[] expected=new byte[7]; System.arraycopy(b,2,expected,0,2); System.arraycopy(b1,0,expected,2,5); assertTrue(Bytes.equals(actual,expected)); actual=mbb1.toBytes(1,3); assertTrue(Bytes.equals(actual,0,actual.length,b,1,3)); }

Class: org.apache.hadoop.hbase.procedure.TestProcedureCoordinator

InternalCallVerifier NullVerifier 
/** * Currently we can only handle one procedure at a time. This makes sure we handle that and * reject submitting more. */ @Test public void testThreadPoolSize() throws Exception { ProcedureCoordinator coordinator=buildNewCoordinator(); Procedure proc=new Procedure(coordinator,monitor,WAKE_FREQUENCY,TIMEOUT,procName,procData,expected); Procedure procSpy=spy(proc); Procedure proc2=new Procedure(coordinator,monitor,WAKE_FREQUENCY,TIMEOUT,procName + "2",procData,expected); Procedure procSpy2=spy(proc2); when(coordinator.createProcedure(any(ForeignExceptionDispatcher.class),eq(procName),eq(procData),anyListOf(String.class))).thenReturn(procSpy,procSpy2); coordinator.startProcedure(procSpy.getErrorMonitor(),procName,procData,expected); assertNull("Coordinator successfully ran two tasks at once with a single thread pool.",coordinator.startProcedure(proc2.getErrorMonitor(),"another op",procData,expected)); }

Class: org.apache.hadoop.hbase.procedure.TestProcedureManager

InternalCallVerifier EqualityVerifier 
@Test public void testSimpleProcedureManager() throws IOException { Admin admin=util.getHBaseAdmin(); byte[] result=admin.execProcedureWithRet(SimpleMasterProcedureManager.SIMPLE_SIGNATURE,"mytest",new HashMap()); assertArrayEquals("Incorrect return data from execProcedure",SimpleMasterProcedureManager.SIMPLE_DATA.getBytes(),result); }

Class: org.apache.hadoop.hbase.procedure.TestZKProcedure

APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
/** * Test a distributed commit with multiple cohort members, where one of the cohort members has a * timeout exception during the prepare stage. */ @Test public void testMultiCohortWithMemberTimeoutDuringPrepare() throws Exception { String opDescription="error injection coordination"; String[] cohortMembers=new String[]{"one","two","three"}; List expected=Lists.newArrayList(cohortMembers); final int memberErrorIndex=2; final CountDownLatch coordinatorReceivedErrorLatch=new CountDownLatch(1); ZooKeeperWatcher coordinatorWatcher=newZooKeeperWatcher(); ZKProcedureCoordinatorRpcs coordinatorController=new ZKProcedureCoordinatorRpcs(coordinatorWatcher,opDescription,COORDINATOR_NODE_NAME); ThreadPoolExecutor pool=ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME,POOL_SIZE,KEEP_ALIVE); ProcedureCoordinator coordinator=spy(new ProcedureCoordinator(coordinatorController,pool)); SubprocedureFactory subprocFactory=Mockito.mock(SubprocedureFactory.class); List> members=new ArrayList>(expected.size()); for ( String member : expected) { ZooKeeperWatcher watcher=newZooKeeperWatcher(); ZKProcedureMemberRpcs controller=new ZKProcedureMemberRpcs(watcher,opDescription); ThreadPoolExecutor pool2=ProcedureMember.defaultPool(member,1,KEEP_ALIVE); ProcedureMember mem=new ProcedureMember(controller,pool2,subprocFactory); members.add(new Pair(mem,controller)); controller.start(member,mem); } final List cohortTasks=new ArrayList(); final int[] elem=new int[1]; for (int i=0; i < members.size(); i++) { ForeignExceptionDispatcher cohortMonitor=new ForeignExceptionDispatcher(); final ProcedureMember comms=members.get(i).getFirst(); Subprocedure commit=Mockito.spy(new SubprocedureImpl(comms,opName,cohortMonitor,WAKE_FREQUENCY,TIMEOUT)); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { int index=elem[0]; if (index == memberErrorIndex) { LOG.debug("Sending error to coordinator"); ForeignException remoteCause=new ForeignException("TIMER",new TimeoutException("subprocTimeout",1,2,0)); Subprocedure r=((Subprocedure)invocation.getMock()); LOG.error("Remote commit failure, not propagating error:" + remoteCause); comms.receiveAbortProcedure(r.getName(),remoteCause); assertEquals(r.isComplete(),true); try { Procedure.waitForLatch(coordinatorReceivedErrorLatch,new ForeignExceptionDispatcher(),WAKE_FREQUENCY,"coordinator received error"); } catch ( InterruptedException e) { LOG.debug("Wait for latch interrupted, done:" + (coordinatorReceivedErrorLatch.getCount() == 0)); Thread.currentThread().interrupt(); } } elem[0]=++index; return null; } } ).when(commit).acquireBarrier(); cohortTasks.add(commit); } final AtomicInteger taskIndex=new AtomicInteger(); Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName),(byte[])Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer(){ @Override public Subprocedure answer( InvocationOnMock invocation) throws Throwable { int index=taskIndex.getAndIncrement(); Subprocedure commit=cohortTasks.get(index); return commit; } } ); ForeignExceptionDispatcher coordinatorTaskErrorMonitor=Mockito.spy(new ForeignExceptionDispatcher()); Procedure coordinatorTask=Mockito.spy(new Procedure(coordinator,coordinatorTaskErrorMonitor,WAKE_FREQUENCY,TIMEOUT,opName,data,expected)); when(coordinator.createProcedure(any(ForeignExceptionDispatcher.class),eq(opName),eq(data),anyListOf(String.class))).thenReturn(coordinatorTask); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { invocation.callRealMethod(); coordinatorReceivedErrorLatch.countDown(); return null; } } ).when(coordinatorTask).receive(Mockito.any(ForeignException.class)); Procedure task=coordinator.startProcedure(coordinatorTaskErrorMonitor,opName,data,expected); assertEquals("Didn't mock coordinator task",coordinatorTask,task); try { task.waitForCompleted(); } catch ( ForeignException fe) { } waitAndVerifyProc(coordinatorTask,once,never(),once,atMost(1),true); verifyCohortSuccessful(expected,subprocFactory,cohortTasks,once,never(),once,once,true); closeAll(coordinator,coordinatorController,members); }

Class: org.apache.hadoop.hbase.procedure.TestZKProcedureControllers

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Smaller test to just test the actuation on the cohort member * @throws Exception on failure */ @Test(timeout=60000) public void testSimpleZKCohortMemberController() throws Exception { ZooKeeperWatcher watcher=UTIL.getZooKeeperWatcher(); final String operationName="instanceTest"; final Subprocedure sub=Mockito.mock(Subprocedure.class); Mockito.when(sub.getName()).thenReturn(operationName); final byte[] data=new byte[]{1,2,3}; final CountDownLatch prepared=new CountDownLatch(1); final CountDownLatch committed=new CountDownLatch(1); final ForeignExceptionDispatcher monitor=spy(new ForeignExceptionDispatcher()); final ZKProcedureMemberRpcs controller=new ZKProcedureMemberRpcs(watcher,"testSimple"); final ProcedureMember member=Mockito.mock(ProcedureMember.class); Mockito.doReturn(sub).when(member).createSubprocedure(operationName,data); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { controller.sendMemberAcquired(sub); prepared.countDown(); return null; } } ).when(member).submitSubprocedure(sub); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { controller.sendMemberCompleted(sub,memberData); committed.countDown(); return null; } } ).when(member).receivedReachedGlobalBarrier(operationName); controller.start(COHORT_NODE_NAME,member); String prepare=ZKProcedureUtil.getAcquireBarrierNode(controller.getZkController(),operationName); ZKUtil.createSetData(watcher,prepare,ProtobufUtil.prependPBMagic(data)); prepared.await(); String commit=ZKProcedureUtil.getReachedBarrierNode(controller.getZkController(),operationName); LOG.debug("Found prepared, posting commit node:" + commit); ZKUtil.createAndFailSilent(watcher,commit); LOG.debug("Commit node:" + commit + ", exists:"+ ZKUtil.checkExists(watcher,commit)); committed.await(); verify(monitor,never()).receive(Mockito.any(ForeignException.class)); ZKUtil.deleteNodeRecursively(watcher,controller.getZkController().getBaseZnode()); assertEquals("Didn't delete prepare node",-1,ZKUtil.checkExists(watcher,prepare)); assertEquals("Didn't delete commit node",-1,ZKUtil.checkExists(watcher,commit)); }

Class: org.apache.hadoop.hbase.procedure2.TestProcedureExecution

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testBadSubprocList(){ List state=new ArrayList(); Procedure subProc2=new TestSequentialProcedure("subProc2",state); Procedure subProc1=new TestSequentialProcedure("subProc1",state,subProc2,NULL_PROC); Procedure rootProc=new TestSequentialProcedure("rootProc",state,subProc1); long rootId=ProcedureTestingUtility.submitAndWait(procExecutor,rootProc); LOG.info(state); ProcedureInfo result=procExecutor.getResult(rootId); assertTrue(state.toString(),result.isFailed()); ProcedureTestingUtility.assertIsIllegalArgumentException(result); assertEquals(state.toString(),4,state.size()); assertEquals("rootProc-execute",state.get(0)); assertEquals("subProc1-execute",state.get(1)); assertEquals("subProc1-rollback",state.get(2)); assertEquals("rootProc-rollback",state.get(3)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testAbortTimeoutWithChildren(){ List state=new ArrayList(); Procedure proc=new TestWaitingProcedure("wproc",state,true); proc.setTimeout(2500); long rootId=ProcedureTestingUtility.submitAndWait(procExecutor,proc); LOG.info(state); ProcedureInfo result=procExecutor.getResult(rootId); assertTrue(state.toString(),result.isFailed()); ProcedureTestingUtility.assertIsTimeoutException(result); assertEquals(state.toString(),4,state.size()); assertEquals("wproc-execute",state.get(0)); assertEquals("wproc-child-execute",state.get(1)); assertEquals("wproc-child-rollback",state.get(2)); assertEquals("wproc-rollback",state.get(3)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testAbortTimeout(){ final int PROC_TIMEOUT_MSEC=2500; List state=new ArrayList(); Procedure proc=new TestWaitingProcedure("wproc",state,false); proc.setTimeout(PROC_TIMEOUT_MSEC); long startTime=EnvironmentEdgeManager.currentTime(); long rootId=ProcedureTestingUtility.submitAndWait(procExecutor,proc); long execTime=EnvironmentEdgeManager.currentTime() - startTime; LOG.info(state); assertTrue("we didn't wait enough execTime=" + execTime,execTime >= PROC_TIMEOUT_MSEC); ProcedureInfo result=procExecutor.getResult(rootId); assertTrue(state.toString(),result.isFailed()); ProcedureTestingUtility.assertIsTimeoutException(result); assertEquals(state.toString(),2,state.size()); assertEquals("wproc-execute",state.get(0)); assertEquals("wproc-rollback",state.get(1)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testRollbackRetriableFailure(){ long procId=ProcedureTestingUtility.submitAndWait(procExecutor,new TestFaultyRollback()); ProcedureInfo result=procExecutor.getResult(procId); assertTrue("expected a failure",result.isFailed()); LOG.info(result.getExceptionFullMessage()); Throwable cause=ProcedureTestingUtility.getExceptionCause(result); assertTrue("expected TestProcedureException, got " + cause,cause instanceof TestProcedureException); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSingleSequentialProcRollback(){ List state=new ArrayList(); Procedure subProc2=new TestSequentialProcedure("subProc2",state,new TestProcedureException("fail test")); Procedure subProc1=new TestSequentialProcedure("subProc1",state,subProc2); Procedure rootProc=new TestSequentialProcedure("rootProc",state,subProc1); long rootId=ProcedureTestingUtility.submitAndWait(procExecutor,rootProc); LOG.info(state); ProcedureInfo result=procExecutor.getResult(rootId); assertTrue(state.toString(),result.isFailed()); LOG.info(result.getExceptionFullMessage()); Throwable cause=ProcedureTestingUtility.getExceptionCause(result); assertTrue("expected TestProcedureException, got " + cause,cause instanceof TestProcedureException); assertEquals(state.toString(),6,state.size()); assertEquals("rootProc-execute",state.get(0)); assertEquals("subProc1-execute",state.get(1)); assertEquals("subProc2-execute",state.get(2)); assertEquals("subProc2-rollback",state.get(3)); assertEquals("subProc1-rollback",state.get(4)); assertEquals("rootProc-rollback",state.get(5)); }

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { htu=new HBaseCommonTestingUtility(); testDir=htu.getDataTestDir(); fs=testDir.getFileSystem(htu.getConfiguration()); assertTrue(testDir.depth() > 1); logDir=new Path(testDir,"proc-logs"); procStore=ProcedureTestingUtility.createWalStore(htu.getConfiguration(),fs,logDir); procExecutor=new ProcedureExecutor(htu.getConfiguration(),null,procStore); procStore.start(PROCEDURE_EXECUTOR_SLOTS); procExecutor.start(PROCEDURE_EXECUTOR_SLOTS,true); }

Class: org.apache.hadoop.hbase.procedure2.TestProcedureRecovery

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testMultiStepRollbackRecovery() throws Exception { Procedure proc=new TestMultiStepProcedure(); long procId=ProcedureTestingUtility.submitAndWait(procExecutor,proc); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); procSleepInterval=2500; restart(); assertTrue(procExecutor.abort(procId)); waitProcedure(procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureInfo result=procExecutor.getResult(procId); ProcedureTestingUtility.assertIsAbortException(result); }

InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testMultiStepProcRecovery() throws Exception { Procedure proc=new TestMultiStepProcedure(); long procId=ProcedureTestingUtility.submitAndWait(procExecutor,proc); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); assertTrue(procExecutor.isRunning()); ProcedureInfo result=procExecutor.getResult(procId); ProcedureTestingUtility.assertProcNotFailed(result); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testStateMachineRollbackRecovery() throws Exception { ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor,true); ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor,true); Procedure proc=new TestStateMachineProcedure(); long procId=ProcedureTestingUtility.submitAndWait(procExecutor,proc); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); procSleepInterval=2500; restart(); assertTrue(procExecutor.abort(procId)); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); assertFalse(procExecutor.isRunning()); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); restart(); waitProcedure(procId); assertFalse(procExecutor.isRunning()); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); restart(); waitProcedure(procId); assertTrue(procExecutor.isRunning()); ProcedureInfo result=procExecutor.getResult(procId); ProcedureTestingUtility.assertIsAbortException(result); }

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { htu=new HBaseCommonTestingUtility(); testDir=htu.getDataTestDir(); fs=testDir.getFileSystem(htu.getConfiguration()); assertTrue(testDir.depth() > 1); logDir=new Path(testDir,"proc-logs"); procEnv=new TestProcEnv(); procStore=ProcedureTestingUtility.createStore(htu.getConfiguration(),fs,logDir); procExecutor=new ProcedureExecutor(htu.getConfiguration(),procEnv,procStore); procExecutor.testing=new ProcedureExecutor.Testing(); procStore.start(PROCEDURE_EXECUTOR_SLOTS); procExecutor.start(PROCEDURE_EXECUTOR_SLOTS,true); procSleepInterval=0; }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSingleStepProcRecovery() throws Exception { Procedure proc=new TestSingleStepProcedure(); procExecutor.testing.killBeforeStoreUpdate=true; long procId=ProcedureTestingUtility.submitAndWait(procExecutor,proc); assertFalse(procExecutor.isRunning()); procExecutor.testing.killBeforeStoreUpdate=false; long restartTs=EnvironmentEdgeManager.currentTime(); restart(); waitProcedure(procId); ProcedureInfo result=procExecutor.getResult(procId); assertTrue(result.getLastUpdate() > restartTs); ProcedureTestingUtility.assertProcNotFailed(result); assertEquals(1,Bytes.toInt(result.getResult())); long resultTs=result.getLastUpdate(); restart(); result=procExecutor.getResult(procId); ProcedureTestingUtility.assertProcNotFailed(result); assertEquals(resultTs,result.getLastUpdate()); assertEquals(1,Bytes.toInt(result.getResult())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testStateMachineRecovery() throws Exception { ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor,true); ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor,true); Procedure proc=new TestStateMachineProcedure(); long procId=ProcedureTestingUtility.submitAndWait(procExecutor,proc); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor,procId); assertFalse(procExecutor.isRunning()); restart(); waitProcedure(procId); assertTrue(procExecutor.isRunning()); ProcedureInfo result=procExecutor.getResult(procId); ProcedureTestingUtility.assertProcNotFailed(result); assertEquals(15,Bytes.toInt(result.getResult())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=30000) public void testRunningProcWithSameNonce() throws Exception { final long nonceGroup=456; final long nonce=33333; Procedure proc=new TestSingleStepProcedure(); long procId=ProcedureTestingUtility.submitAndWait(procExecutor,proc,nonceGroup,nonce); CountDownLatch latch=new CountDownLatch(1); procEnv.setWaitLatch(latch); restart(); Procedure proc2=new TestSingleStepProcedure(); long procId2=procExecutor.submitProcedure(proc2,nonceGroup,nonce); latch.countDown(); procEnv.setWaitLatch(null); assertTrue(procId == procId2); }

Class: org.apache.hadoop.hbase.procedure2.TestProcedureReplayOrder

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { htu=new HBaseCommonTestingUtility(); htu.getConfiguration().setInt("hbase.procedure.store.wal.sync.wait.msec",25); testDir=htu.getDataTestDir(); fs=testDir.getFileSystem(htu.getConfiguration()); assertTrue(testDir.depth() > 1); logDir=new Path(testDir,"proc-logs"); procEnv=new TestProcedureEnv(); procStore=ProcedureTestingUtility.createWalStore(htu.getConfiguration(),fs,logDir); procExecutor=new ProcedureExecutor(htu.getConfiguration(),procEnv,procStore); procStore.start(NUM_THREADS); procExecutor.start(1,true); }

Class: org.apache.hadoop.hbase.procedure2.TestProcedureToString

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testStateMachineProcedure(){ SimpleStateMachineProcedure p=new SimpleStateMachineProcedure(); ProcedureState state=ProcedureState.RUNNABLE; p.setState(state); p.setNextState(ServerCrashState.SERVER_CRASH_ASSIGN); assertTrue(p.toString().contains(state.toString())); assertTrue(p.toString().contains(ServerCrashState.SERVER_CRASH_ASSIGN.toString())); }

InternalCallVerifier BooleanVerifier 
/** * Test that I can override the toString for its state value. * @throws ProcedureYieldException * @throws InterruptedException */ @Test public void testBasicToString() throws ProcedureYieldException, InterruptedException { BasicProcedure p=new BasicProcedure(); ProcedureState state=ProcedureState.RUNNABLE; p.setState(state); assertTrue(p.toString().contains(state.toString())); p=new DoublingStateStringBasicProcedure(); p.setState(state); String testStr=state.toString() + state.toString(); assertTrue(p.toString().contains(testStr)); }

Class: org.apache.hadoop.hbase.procedure2.TestYieldProcedures

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { htu=new HBaseCommonTestingUtility(); testDir=htu.getDataTestDir(); fs=testDir.getFileSystem(htu.getConfiguration()); assertTrue(testDir.depth() > 1); logDir=new Path(testDir,"proc-logs"); procStore=ProcedureTestingUtility.createWalStore(htu.getConfiguration(),fs,logDir); procExecutor=new ProcedureExecutor(htu.getConfiguration(),new TestProcEnv(),procStore); procStore.start(PROCEDURE_EXECUTOR_SLOTS); procExecutor.start(PROCEDURE_EXECUTOR_SLOTS,true); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testYieldEachExecutionStep() throws Exception { final int NUM_STATES=3; TestStateMachineProcedure[] procs=new TestStateMachineProcedure[3]; for (int i=0; i < procs.length; ++i) { procs[i]=new TestStateMachineProcedure(true,false); procExecutor.submitProcedure(procs[i]); } ProcedureTestingUtility.waitNoProcedureRunning(procExecutor); long prevTimestamp=0; for (int execStep=0; execStep < NUM_STATES; ++execStep) { for (int i=0; i < procs.length; ++i) { assertEquals(NUM_STATES * 2,procs[i].getExecutionInfo().size()); TestStateMachineProcedure.ExecutionInfo info=procs[i].getExecutionInfo().get(execStep); LOG.info("i=" + i + " execStep="+ execStep+ " timestamp="+ info.getTimestamp()); assertEquals(false,info.isRollback()); assertEquals(execStep,info.getStep().ordinal()); assertEquals(prevTimestamp + 1,info.getTimestamp()); prevTimestamp++; } } int count=NUM_STATES; for (int execStep=NUM_STATES - 1; execStep >= 0; --execStep) { for (int i=0; i < procs.length; ++i) { assertEquals(NUM_STATES * 2,procs[i].getExecutionInfo().size()); TestStateMachineProcedure.ExecutionInfo info=procs[i].getExecutionInfo().get(count); LOG.info("i=" + i + " execStep="+ execStep+ " timestamp="+ info.getTimestamp()); assertEquals(true,info.isRollback()); assertEquals(execStep,info.getStep().ordinal()); assertEquals(prevTimestamp + 1,info.getTimestamp()); prevTimestamp++; } count++; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testYieldOnInterrupt() throws Exception { final int NUM_STATES=3; int count=0; TestStateMachineProcedure proc=new TestStateMachineProcedure(true,true); ProcedureTestingUtility.submitAndWait(procExecutor,proc); assertEquals(NUM_STATES * 4,proc.getExecutionInfo().size()); for (int i=0; i < NUM_STATES; ++i) { TestStateMachineProcedure.ExecutionInfo info=proc.getExecutionInfo().get(count++); assertEquals(false,info.isRollback()); assertEquals(i,info.getStep().ordinal()); info=proc.getExecutionInfo().get(count++); assertEquals(false,info.isRollback()); assertEquals(i,info.getStep().ordinal()); } for (int i=NUM_STATES - 1; i >= 0; --i) { TestStateMachineProcedure.ExecutionInfo info=proc.getExecutionInfo().get(count++); assertEquals(true,info.isRollback()); assertEquals(i,info.getStep().ordinal()); info=proc.getExecutionInfo().get(count++); assertEquals(true,info.isRollback()); assertEquals(i,info.getStep().ordinal()); } }

Class: org.apache.hadoop.hbase.procedure2.store.TestProcedureStoreTracker

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPartialTracker(){ ProcedureStoreTracker tracker=new ProcedureStoreTracker(); tracker.setPartialFlag(true); assertTrue(tracker.isEmpty()); assertEquals(ProcedureStoreTracker.DeleteState.MAYBE,tracker.isDeleted(1)); assertEquals(ProcedureStoreTracker.DeleteState.MAYBE,tracker.isDeleted(579)); tracker.setDeleted(1,true); tracker.dump(); assertEquals(ProcedureStoreTracker.DeleteState.YES,tracker.isDeleted(1)); assertEquals(ProcedureStoreTracker.DeleteState.MAYBE,tracker.isDeleted(2)); assertEquals(ProcedureStoreTracker.DeleteState.MAYBE,tracker.isDeleted(579)); tracker.setDeleted(579,false); assertEquals(ProcedureStoreTracker.DeleteState.YES,tracker.isDeleted(1)); assertEquals(ProcedureStoreTracker.DeleteState.MAYBE,tracker.isDeleted(2)); assertEquals(ProcedureStoreTracker.DeleteState.NO,tracker.isDeleted(579)); assertEquals(ProcedureStoreTracker.DeleteState.MAYBE,tracker.isDeleted(577)); assertEquals(ProcedureStoreTracker.DeleteState.MAYBE,tracker.isDeleted(580)); tracker.setDeleted(579,true); tracker.setPartialFlag(false); assertTrue(tracker.isEmpty()); }

IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testIsTracking(){ long[][] procIds=new long[][]{{4,7},{1024,1027},{8192,8194}}; long[][] checkIds=new long[][]{{2,8},{1023,1025},{8193,8191}}; ProcedureStoreTracker tracker=new ProcedureStoreTracker(); for (int i=0; i < procIds.length; ++i) { long[] seq=procIds[i]; tracker.insert(seq[0]); tracker.insert(seq[1]); } for (int i=0; i < procIds.length; ++i) { long[] check=checkIds[i]; long[] seq=procIds[i]; assertTrue(tracker.isTracking(seq[0],seq[1])); assertTrue(tracker.isTracking(check[0],check[1])); tracker.delete(seq[0]); tracker.delete(seq[1]); assertFalse(tracker.isTracking(seq[0],seq[1])); assertFalse(tracker.isTracking(check[0],check[1])); } assertTrue(tracker.isEmpty()); }

IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDelete(){ final ProcedureStoreTracker tracker=new ProcedureStoreTracker(); long[] procIds=new long[]{65,1,193}; for (int i=0; i < procIds.length; ++i) { tracker.insert(procIds[i]); tracker.dump(); } for (int i=0; i < (64 * 4); ++i) { boolean hasProc=false; for (int j=0; j < procIds.length; ++j) { if (procIds[j] == i) { hasProc=true; break; } } if (hasProc) { assertEquals(ProcedureStoreTracker.DeleteState.NO,tracker.isDeleted(i)); } else { assertEquals("procId=" + i,ProcedureStoreTracker.DeleteState.YES,tracker.isDeleted(i)); } } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSeqInsertAndDelete(){ ProcedureStoreTracker tracker=new ProcedureStoreTracker(); assertTrue(tracker.isEmpty()); final int MIN_PROC=1; final int MAX_PROC=1 << 10; for (int i=MIN_PROC; i < MAX_PROC; ++i) { tracker.insert(i); for (int j=MIN_PROC; j <= i; ++j) { assertEquals(ProcedureStoreTracker.DeleteState.NO,tracker.isDeleted(j)); } for (int j=i + 1; j < MAX_PROC; ++j) { assertTrue(tracker.isDeleted(j) != ProcedureStoreTracker.DeleteState.NO); } } for (int i=MIN_PROC; i < MAX_PROC; ++i) { tracker.delete(i); for (int j=MIN_PROC; j <= i; ++j) { assertEquals(ProcedureStoreTracker.DeleteState.YES,tracker.isDeleted(j)); } for (int j=i + 1; j < MAX_PROC; ++j) { assertEquals(ProcedureStoreTracker.DeleteState.NO,tracker.isDeleted(j)); } } assertTrue(tracker.isEmpty()); }

IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testBasicCRUD(){ ProcedureStoreTracker tracker=new ProcedureStoreTracker(); assertTrue(tracker.isEmpty()); long[] procs=new long[]{1,2,3,4,5,6}; tracker.insert(procs[0]); tracker.insert(procs[1],new long[]{procs[2],procs[3],procs[4]}); assertFalse(tracker.isEmpty()); assertTrue(tracker.isUpdated()); tracker.resetUpdates(); assertFalse(tracker.isUpdated()); for (int i=0; i < 4; ++i) { tracker.update(procs[i]); assertFalse(tracker.isEmpty()); assertFalse(tracker.isUpdated()); } tracker.update(procs[4]); assertFalse(tracker.isEmpty()); assertTrue(tracker.isUpdated()); tracker.update(procs[5]); assertFalse(tracker.isEmpty()); assertTrue(tracker.isUpdated()); for (int i=0; i < 5; ++i) { tracker.delete(procs[i]); assertFalse(tracker.isEmpty()); assertTrue(tracker.isUpdated()); } tracker.delete(procs[5]); assertTrue(tracker.isEmpty()); }

Class: org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore

TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier 
@Before public void setUp() throws IOException { htu=new HBaseCommonTestingUtility(); testDir=htu.getDataTestDir(); fs=testDir.getFileSystem(htu.getConfiguration()); assertTrue(testDir.depth() > 1); logDir=new Path(testDir,"proc-logs"); procStore=ProcedureTestingUtility.createWalStore(htu.getConfiguration(),fs,logDir); procStore.start(PROCEDURE_STORE_SLOTS); procStore.recoverLease(); procStore.load(new LoadCounter()); }

InternalCallVerifier EqualityVerifier 
@Test public void testLoad() throws Exception { Set procIds=new HashSet<>(); Procedure proc1=new TestSequentialProcedure(); procIds.add(proc1.getProcId()); procStore.insert(proc1,null); Procedure proc2=new TestSequentialProcedure(); Procedure[] child2=new Procedure[2]; child2[0]=new TestSequentialProcedure(); child2[1]=new TestSequentialProcedure(); procIds.add(proc2.getProcId()); procIds.add(child2[0].getProcId()); procIds.add(child2[1].getProcId()); procStore.insert(proc2,child2); verifyProcIdsOnRestart(procIds); procStore.update(proc1); procStore.update(child2[1]); procStore.delete(child2[1].getProcId()); procIds.remove(child2[1].getProcId()); verifyProcIdsOnRestart(procIds); procStore.stop(false); FileStatus[] logs=fs.listStatus(logDir); assertEquals(3,logs.length); for (int i=0; i < logs.length; ++i) { corruptLog(logs[i],4); } verifyProcIdsOnRestart(procIds); }

InternalCallVerifier EqualityVerifier 
@Test public void testNoTrailerDoubleRestart() throws Exception { Procedure proc0=new TestSequentialProcedure(); procStore.insert(proc0,null); Procedure proc1=new TestSequentialProcedure(); procStore.insert(proc1,null); Procedure proc2=new TestSequentialProcedure(); procStore.insert(proc2,null); procStore.rollWriterForTesting(); procStore.delete(proc1.getProcId()); procStore.rollWriterForTesting(); procStore.update(proc2); procStore.rollWriterForTesting(); procStore.delete(proc2.getProcId()); procStore.stop(false); FileStatus[] logs=fs.listStatus(logDir); assertEquals(4,logs.length); for (int i=0; i < logs.length; ++i) { corruptLog(logs[i],4); } LoadCounter loader=new LoadCounter(); storeRestart(loader); assertEquals(1,loader.getLoadedCount()); assertEquals(0,loader.getCorruptedCount()); assertEquals(5,fs.listStatus(logDir).length); loader=new LoadCounter(); storeRestart(loader); assertEquals(1,loader.getLoadedCount()); assertEquals(0,loader.getCorruptedCount()); procStore.delete(proc0.getProcId()); procStore.periodicRollForTesting(); assertEquals(1,fs.listStatus(logDir).length); storeRestart(loader); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInsertUpdateDelete() throws Exception { final int NTHREAD=2; procStore.stop(false); fs.delete(logDir,true); org.apache.hadoop.conf.Configuration conf=new org.apache.hadoop.conf.Configuration(htu.getConfiguration()); conf.setBoolean("hbase.procedure.store.wal.use.hsync",false); conf.setInt("hbase.procedure.store.wal.periodic.roll.msec",10000); conf.setInt("hbase.procedure.store.wal.roll.threshold",128 * 1024); fs.mkdirs(logDir); procStore=ProcedureTestingUtility.createWalStore(conf,fs,logDir); procStore.start(NTHREAD); procStore.recoverLease(); LoadCounter loader=new LoadCounter(); procStore.load(loader); assertEquals(0,loader.getMaxProcId()); assertEquals(0,loader.getLoadedCount()); assertEquals(0,loader.getCorruptedCount()); final long LAST_PROC_ID=9999; final Thread[] thread=new Thread[NTHREAD]; final AtomicLong procCounter=new AtomicLong((long)Math.round(Math.random() * 100)); for (int i=0; i < thread.length; ++i) { thread[i]=new Thread(){ @Override public void run(){ Random rand=new Random(); TestProcedure proc; do { proc=new TestProcedure(procCounter.addAndGet(1)); procStore.insert(proc,null); for (int i=0, nupdates=rand.nextInt(10); i <= nupdates; ++i) { try { Thread.sleep(0,rand.nextInt(15)); } catch ( InterruptedException e) { } procStore.update(proc); } procStore.delete(proc.getProcId()); } while (proc.getProcId() < LAST_PROC_ID); } } ; thread[i].start(); } for (int i=0; i < thread.length; ++i) { thread[i].join(); } procStore.getStoreTracker().dump(); assertTrue(procCounter.get() >= LAST_PROC_ID); assertTrue(procStore.getStoreTracker().isEmpty()); assertEquals(1,procStore.getActiveLogs().size()); }

InternalCallVerifier EqualityVerifier 
@Test public void testEmptyLogLoad() throws Exception { LoadCounter loader=new LoadCounter(); storeRestart(loader); assertEquals(0,loader.getMaxProcId()); assertEquals(0,loader.getLoadedCount()); assertEquals(0,loader.getCorruptedCount()); }

InternalCallVerifier EqualityVerifier 
@Test public void testEmptyRoll() throws Exception { for (int i=0; i < 10; ++i) { procStore.periodicRollForTesting(); } FileStatus[] status=fs.listStatus(logDir); assertEquals(1,status.length); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCorruptedEntries() throws Exception { for (int i=0; i < 100; ++i) { procStore.insert(new TestSequentialProcedure(),null); } procStore.stop(false); FileStatus[] logs=fs.listStatus(logDir); assertEquals(1,logs.length); corruptLog(logs[0],1823); LoadCounter loader=new LoadCounter(); storeRestart(loader); assertTrue(procStore.getCorruptedLogs() != null); assertEquals(1,procStore.getCorruptedLogs().size()); assertEquals(85,loader.getLoadedCount()); assertEquals(0,loader.getCorruptedCount()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCorruptedProcedures() throws Exception { TestProcedure[] rootProcs=new TestProcedure[10]; for (int i=1; i <= rootProcs.length; i++) { rootProcs[i - 1]=new TestProcedure(i,0); procStore.insert(rootProcs[i - 1],null); rootProcs[i - 1].addStackId(0); procStore.update(rootProcs[i - 1]); } procStore.rollWriterForTesting(); for (int i=1; i <= rootProcs.length; i++) { TestProcedure b=new TestProcedure(rootProcs.length + i,i); rootProcs[i - 1].addStackId(1); procStore.insert(rootProcs[i - 1],new Procedure[]{b}); } procStore.rollWriterForTesting(); for (int i=1; i <= rootProcs.length; i++) { procStore.update(new TestProcedure(rootProcs.length + i,i)); } procStore.stop(false); FileStatus[] logs=fs.listStatus(logDir); assertEquals(Arrays.toString(logs),2,logs.length); Arrays.sort(logs,new Comparator(){ @Override public int compare( FileStatus o1, FileStatus o2){ return o1.getPath().getName().compareTo(o2.getPath().getName()); } } ); LoadCounter loader=new LoadCounter(); storeRestart(loader); assertEquals(rootProcs.length * 2,loader.getLoadedCount()); assertEquals(0,loader.getCorruptedCount()); fs.delete(logs[0].getPath(),false); loader.reset(); storeRestart(loader); assertEquals(0,loader.getLoadedCount()); assertEquals(rootProcs.length,loader.getCorruptedCount()); for ( Procedure proc : loader.getCorrupted()) { assertTrue(proc.toString(),proc.getParentProcId() <= rootProcs.length); assertTrue(proc.toString(),proc.getProcId() > rootProcs.length && proc.getProcId() <= (rootProcs.length * 2)); } }

InternalCallVerifier EqualityVerifier 
@Test public void testRollAndRemove() throws IOException { Procedure proc1=new TestSequentialProcedure(); procStore.insert(proc1,null); Procedure proc2=new TestSequentialProcedure(); procStore.insert(proc2,null); procStore.rollWriterForTesting(); assertEquals(2,procStore.getActiveLogs().size()); procStore.update(proc1); procStore.update(proc2); assertEquals(1,procStore.getActiveLogs().size()); procStore.rollWriterForTesting(); assertEquals(2,procStore.getActiveLogs().size()); procStore.delete(proc1.getProcId()); procStore.delete(proc2.getProcId()); assertEquals(1,procStore.getActiveLogs().size()); }

InternalCallVerifier EqualityVerifier 
@Test public void testCorruptedTrailer() throws Exception { for (int i=0; i < 100; ++i) { procStore.insert(new TestSequentialProcedure(),null); } procStore.stop(false); FileStatus[] logs=fs.listStatus(logDir); assertEquals(1,logs.length); corruptLog(logs[0],4); LoadCounter loader=new LoadCounter(); storeRestart(loader); assertEquals(100,loader.getLoadedCount()); assertEquals(0,loader.getCorruptedCount()); }

Class: org.apache.hadoop.hbase.procedure2.util.TestTimeoutBlockingQueue

IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testOrder(){ TimeoutBlockingQueue queue=new TimeoutBlockingQueue(8,new TestObjectTimeoutRetriever()); long[] timeouts=new long[]{500,200,700,300,600,600,200,800,500}; for (int i=0; i < timeouts.length; ++i) { for (int j=0; j <= i; ++j) { queue.add(new TestObject(j,timeouts[j])); queue.dump(); } long prev=0; for (int j=0; j <= i; ++j) { TestObject obj=queue.poll(); assertTrue(obj.getTimeout() >= prev); prev=obj.getTimeout(); queue.dump(); } } }

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testTimeoutBlockingQueue(){ TimeoutBlockingQueue queue; int[][] testArray=new int[][]{{200,400,600},{200,400,100},{200,400,300}}; for (int i=0; i < testArray.length; ++i) { int[] sortedArray=Arrays.copyOf(testArray[i],testArray[i].length); Arrays.sort(sortedArray); queue=new TimeoutBlockingQueue(2,new TestObjectTimeoutRetriever()); for (int j=0; j < testArray[i].length; ++j) { queue.add(new TestObject(j,testArray[i][j])); queue.dump(); } for (int j=0; !queue.isEmpty(); ++j) { assertEquals(sortedArray[j],queue.poll().getTimeout()); } queue=new TimeoutBlockingQueue(2,new TestObjectTimeoutRetriever()); queue.add(new TestObject(0,50)); assertEquals(50,queue.poll().getTimeout()); for (int j=0; j < testArray[i].length; ++j) { queue.add(new TestObject(j,testArray[i][j])); queue.dump(); } for (int j=0; !queue.isEmpty(); ++j) { assertEquals(sortedArray[j],queue.poll().getTimeout()); } } }

Class: org.apache.hadoop.hbase.protobuf.TestProtobufUtil

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test Put Mutate conversions. * @throws IOException */ @Test public void testPut() throws IOException { MutationProto.Builder mutateBuilder=MutationProto.newBuilder(); mutateBuilder.setRow(ByteString.copyFromUtf8("row")); mutateBuilder.setMutateType(MutationType.PUT); mutateBuilder.setTimestamp(111111); ColumnValue.Builder valueBuilder=ColumnValue.newBuilder(); valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); QualifierValue.Builder qualifierBuilder=QualifierValue.newBuilder(); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); qualifierBuilder.setValue(ByteString.copyFromUtf8("v1")); valueBuilder.addQualifierValue(qualifierBuilder.build()); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); qualifierBuilder.setValue(ByteString.copyFromUtf8("v2")); qualifierBuilder.setTimestamp(222222); valueBuilder.addQualifierValue(qualifierBuilder.build()); mutateBuilder.addColumnValue(valueBuilder.build()); MutationProto proto=mutateBuilder.build(); assertEquals(MutationProto.Durability.USE_DEFAULT,proto.getDurability()); mutateBuilder=MutationProto.newBuilder(proto); mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); Put put=ProtobufUtil.toPut(proto); long timestamp=put.getTimeStamp(); for ( ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { for ( QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { if (!qualifier.hasTimestamp()) { qualifier.setTimestamp(timestamp); } } } assertEquals(mutateBuilder.build(),ProtobufUtil.toMutation(MutationType.PUT,put)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test Append Mutate conversions. * @throws IOException */ @Test public void testAppend() throws IOException { long timeStamp=111111; MutationProto.Builder mutateBuilder=MutationProto.newBuilder(); mutateBuilder.setRow(ByteString.copyFromUtf8("row")); mutateBuilder.setMutateType(MutationType.APPEND); mutateBuilder.setTimestamp(timeStamp); ColumnValue.Builder valueBuilder=ColumnValue.newBuilder(); valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); QualifierValue.Builder qualifierBuilder=QualifierValue.newBuilder(); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); qualifierBuilder.setValue(ByteString.copyFromUtf8("v1")); qualifierBuilder.setTimestamp(timeStamp); valueBuilder.addQualifierValue(qualifierBuilder.build()); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); qualifierBuilder.setValue(ByteString.copyFromUtf8("v2")); valueBuilder.addQualifierValue(qualifierBuilder.build()); qualifierBuilder.setTimestamp(timeStamp); mutateBuilder.addColumnValue(valueBuilder.build()); MutationProto proto=mutateBuilder.build(); assertEquals(MutationProto.Durability.USE_DEFAULT,proto.getDurability()); mutateBuilder=MutationProto.newBuilder(proto); mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); Append append=ProtobufUtil.toAppend(proto,null); mutateBuilder.setTimestamp(append.getTimeStamp()); assertEquals(mutateBuilder.build(),ProtobufUtil.toMutation(MutationType.APPEND,append)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test basic Get conversions. * @throws IOException */ @Test public void testGet() throws IOException { ClientProtos.Get.Builder getBuilder=ClientProtos.Get.newBuilder(); getBuilder.setRow(ByteString.copyFromUtf8("row")); Column.Builder columnBuilder=Column.newBuilder(); columnBuilder.setFamily(ByteString.copyFromUtf8("f1")); columnBuilder.addQualifier(ByteString.copyFromUtf8("c1")); columnBuilder.addQualifier(ByteString.copyFromUtf8("c2")); getBuilder.addColumn(columnBuilder.build()); columnBuilder.clear(); columnBuilder.setFamily(ByteString.copyFromUtf8("f2")); getBuilder.addColumn(columnBuilder.build()); ClientProtos.Get proto=getBuilder.build(); assertEquals(1,proto.getMaxVersions()); assertEquals(true,proto.getCacheBlocks()); getBuilder=ClientProtos.Get.newBuilder(proto); getBuilder.setMaxVersions(1); getBuilder.setCacheBlocks(true); Get get=ProtobufUtil.toGet(proto); assertEquals(getBuilder.build(),ProtobufUtil.toGet(get)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test Delete Mutate conversions. * @throws IOException */ @Test public void testDelete() throws IOException { MutationProto.Builder mutateBuilder=MutationProto.newBuilder(); mutateBuilder.setRow(ByteString.copyFromUtf8("row")); mutateBuilder.setMutateType(MutationType.DELETE); mutateBuilder.setTimestamp(111111); ColumnValue.Builder valueBuilder=ColumnValue.newBuilder(); valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); QualifierValue.Builder qualifierBuilder=QualifierValue.newBuilder(); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); qualifierBuilder.setDeleteType(DeleteType.DELETE_ONE_VERSION); qualifierBuilder.setTimestamp(111222); valueBuilder.addQualifierValue(qualifierBuilder.build()); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); qualifierBuilder.setDeleteType(DeleteType.DELETE_MULTIPLE_VERSIONS); qualifierBuilder.setTimestamp(111333); valueBuilder.addQualifierValue(qualifierBuilder.build()); mutateBuilder.addColumnValue(valueBuilder.build()); MutationProto proto=mutateBuilder.build(); assertEquals(MutationProto.Durability.USE_DEFAULT,proto.getDurability()); mutateBuilder=MutationProto.newBuilder(proto); mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); Delete delete=ProtobufUtil.toDelete(proto); for ( ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { for ( QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { qualifier.setValue(ByteString.EMPTY); } } assertEquals(mutateBuilder.build(),ProtobufUtil.toMutation(MutationType.DELETE,delete)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test Increment Mutate conversions. * @throws IOException */ @Test public void testIncrement() throws IOException { MutationProto.Builder mutateBuilder=MutationProto.newBuilder(); mutateBuilder.setRow(ByteString.copyFromUtf8("row")); mutateBuilder.setMutateType(MutationType.INCREMENT); ColumnValue.Builder valueBuilder=ColumnValue.newBuilder(); valueBuilder.setFamily(ByteString.copyFromUtf8("f1")); QualifierValue.Builder qualifierBuilder=QualifierValue.newBuilder(); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1")); qualifierBuilder.setValue(ByteStringer.wrap(Bytes.toBytes(11L))); valueBuilder.addQualifierValue(qualifierBuilder.build()); qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2")); qualifierBuilder.setValue(ByteStringer.wrap(Bytes.toBytes(22L))); valueBuilder.addQualifierValue(qualifierBuilder.build()); mutateBuilder.addColumnValue(valueBuilder.build()); MutationProto proto=mutateBuilder.build(); assertEquals(MutationProto.Durability.USE_DEFAULT,proto.getDurability()); mutateBuilder=MutationProto.newBuilder(proto); mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT); Increment increment=ProtobufUtil.toIncrement(proto,null); assertEquals(mutateBuilder.build(),ProtobufUtil.toMutation(increment,MutationProto.newBuilder(),HConstants.NO_NONCE)); }

InternalCallVerifier EqualityVerifier 
@Test public void testException() throws IOException { NameBytesPair.Builder builder=NameBytesPair.newBuilder(); final String omg="OMG!!!"; builder.setName("java.io.IOException"); builder.setValue(ByteStringer.wrap(Bytes.toBytes(omg))); Throwable t=ProtobufUtil.toException(builder.build()); assertEquals(omg,t.getMessage()); builder.clear(); builder.setName("org.apache.hadoop.ipc.RemoteException"); builder.setValue(ByteStringer.wrap(Bytes.toBytes(omg))); t=ProtobufUtil.toException(builder.build()); assertEquals(omg,t.getMessage()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test basic Scan conversions. * @throws IOException */ @Test public void testScan() throws IOException { ClientProtos.Scan.Builder scanBuilder=ClientProtos.Scan.newBuilder(); scanBuilder.setStartRow(ByteString.copyFromUtf8("row1")); scanBuilder.setStopRow(ByteString.copyFromUtf8("row2")); Column.Builder columnBuilder=Column.newBuilder(); columnBuilder.setFamily(ByteString.copyFromUtf8("f1")); columnBuilder.addQualifier(ByteString.copyFromUtf8("c1")); columnBuilder.addQualifier(ByteString.copyFromUtf8("c2")); scanBuilder.addColumn(columnBuilder.build()); columnBuilder.clear(); columnBuilder.setFamily(ByteString.copyFromUtf8("f2")); scanBuilder.addColumn(columnBuilder.build()); ClientProtos.Scan proto=scanBuilder.build(); assertEquals(1,proto.getMaxVersions()); assertEquals(true,proto.getCacheBlocks()); scanBuilder=ClientProtos.Scan.newBuilder(proto); scanBuilder.setMaxVersions(2); scanBuilder.setCacheBlocks(false); scanBuilder.setCaching(1024); ClientProtos.Scan expectedProto=scanBuilder.build(); ClientProtos.Scan actualProto=ProtobufUtil.toScan(ProtobufUtil.toScan(expectedProto)); assertEquals(expectedProto,actualProto); }

Class: org.apache.hadoop.hbase.protobuf.TestReplicationProtobuf

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Little test to check we can basically convert list of a list of KVs into a CellScanner * @throws IOException */ @Test public void testGetCellScanner() throws IOException { List a=new ArrayList(); KeyValue akv=new KeyValue(Bytes.toBytes("a"),-1L); a.add(akv); a.add(new KeyValue(Bytes.toBytes("aa"),-1L)); a.add(new KeyValue(Bytes.toBytes("aaa"),-1L)); List b=new ArrayList(); KeyValue bkv=new KeyValue(Bytes.toBytes("b"),-1L); a.add(bkv); List c=new ArrayList(); KeyValue ckv=new KeyValue(Bytes.toBytes("c"),-1L); c.add(ckv); List> all=new ArrayList>(); all.add(a); all.add(b); all.add(c); CellScanner scanner=ReplicationProtbufUtil.getCellScanner(all,0); testAdvancetHasSameRow(scanner,akv); scanner.advance(); scanner.advance(); testAdvancetHasSameRow(scanner,bkv); testAdvancetHasSameRow(scanner,ckv); assertFalse(scanner.advance()); }

Class: org.apache.hadoop.hbase.quotas.TestQuotaAdmin

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testThrottleType() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); String userName=User.getCurrent().getShortName(); admin.setQuota(QuotaSettingsFactory.throttleUser(userName,ThrottleType.READ_NUMBER,6,TimeUnit.MINUTES)); admin.setQuota(QuotaSettingsFactory.throttleUser(userName,ThrottleType.WRITE_NUMBER,12,TimeUnit.MINUTES)); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName,true)); try (QuotaRetriever scanner=QuotaRetriever.open(TEST_UTIL.getConfiguration())){ int countThrottle=0; int countGlobalBypass=0; for ( QuotaSettings settings : scanner) { switch (settings.getQuotaType()) { case THROTTLE: ThrottleSettings throttle=(ThrottleSettings)settings; if (throttle.getSoftLimit() == 6) { assertEquals(ThrottleType.READ_NUMBER,throttle.getThrottleType()); } else if (throttle.getSoftLimit() == 12) { assertEquals(ThrottleType.WRITE_NUMBER,throttle.getThrottleType()); } else { fail("should not come here, because don't set quota with this limit"); } assertEquals(userName,throttle.getUserName()); assertEquals(null,throttle.getTableName()); assertEquals(null,throttle.getNamespace()); assertEquals(TimeUnit.MINUTES,throttle.getTimeUnit()); countThrottle++; break; case GLOBAL_BYPASS: countGlobalBypass++; break; default : fail("unexpected settings type: " + settings.getQuotaType()); } } assertEquals(2,countThrottle); assertEquals(1,countGlobalBypass); } admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); assertNumResults(1,null); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName,false)); assertNumResults(0,null); }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSimpleScan() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); String userName=User.getCurrent().getShortName(); admin.setQuota(QuotaSettingsFactory.throttleUser(userName,ThrottleType.REQUEST_NUMBER,6,TimeUnit.MINUTES)); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName,true)); try (QuotaRetriever scanner=QuotaRetriever.open(TEST_UTIL.getConfiguration())){ int countThrottle=0; int countGlobalBypass=0; for ( QuotaSettings settings : scanner) { LOG.debug(settings); switch (settings.getQuotaType()) { case THROTTLE: ThrottleSettings throttle=(ThrottleSettings)settings; assertEquals(userName,throttle.getUserName()); assertEquals(null,throttle.getTableName()); assertEquals(null,throttle.getNamespace()); assertEquals(6,throttle.getSoftLimit()); assertEquals(TimeUnit.MINUTES,throttle.getTimeUnit()); countThrottle++; break; case GLOBAL_BYPASS: countGlobalBypass++; break; default : fail("unexpected settings type: " + settings.getQuotaType()); } } assertEquals(1,countThrottle); assertEquals(1,countGlobalBypass); } admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName)); assertNumResults(1,null); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName,false)); assertNumResults(0,null); }

Class: org.apache.hadoop.hbase.quotas.TestQuotaState

InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testSimpleQuotaStateOperation(){ final TableName table=TableName.valueOf("testSimpleQuotaStateOperationTable"); final int NUM_GLOBAL_THROTTLE=3; final int NUM_TABLE_THROTTLE=2; UserQuotaState quotaInfo=new UserQuotaState(); assertTrue(quotaInfo.isBypass()); quotaInfo.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE)); assertFalse(quotaInfo.isBypass()); quotaInfo.setQuotas(table,buildReqNumThrottle(NUM_TABLE_THROTTLE)); assertFalse(quotaInfo.isBypass()); assertTrue(quotaInfo.getGlobalLimiter() == quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); assertThrottleException(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME),NUM_GLOBAL_THROTTLE); assertThrottleException(quotaInfo.getTableLimiter(table),NUM_TABLE_THROTTLE); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testQuotaStateUpdateTableThrottle(){ final TableName TABLE_A=TableName.valueOf("TableA"); final TableName TABLE_B=TableName.valueOf("TableB"); final TableName TABLE_C=TableName.valueOf("TableC"); final int TABLE_A_THROTTLE_1=3; final int TABLE_A_THROTTLE_2=11; final int TABLE_B_THROTTLE=4; final int TABLE_C_THROTTLE=5; final long LAST_UPDATE_1=10; final long LAST_UPDATE_2=20; final long LAST_UPDATE_3=30; UserQuotaState quotaInfo=new UserQuotaState(); assertEquals(0,quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); UserQuotaState otherQuotaState=new UserQuotaState(LAST_UPDATE_1); otherQuotaState.setQuotas(TABLE_A,buildReqNumThrottle(TABLE_A_THROTTLE_1)); otherQuotaState.setQuotas(TABLE_B,buildReqNumThrottle(TABLE_B_THROTTLE)); assertEquals(LAST_UPDATE_1,otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); assertEquals(LAST_UPDATE_1,quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getTableLimiter(TABLE_A),TABLE_A_THROTTLE_1); assertThrottleException(quotaInfo.getTableLimiter(TABLE_B),TABLE_B_THROTTLE); assertNoopLimiter(quotaInfo.getTableLimiter(TABLE_C)); otherQuotaState=new UserQuotaState(LAST_UPDATE_2); otherQuotaState.setQuotas(TABLE_A,buildReqNumThrottle(TABLE_A_THROTTLE_2)); otherQuotaState.setQuotas(TABLE_C,buildReqNumThrottle(TABLE_C_THROTTLE)); assertEquals(LAST_UPDATE_2,otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); assertEquals(LAST_UPDATE_2,quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getTableLimiter(TABLE_A),TABLE_A_THROTTLE_2 - TABLE_A_THROTTLE_1); assertThrottleException(quotaInfo.getTableLimiter(TABLE_C),TABLE_C_THROTTLE); assertNoopLimiter(quotaInfo.getTableLimiter(TABLE_B)); otherQuotaState=new UserQuotaState(LAST_UPDATE_3); assertEquals(LAST_UPDATE_3,otherQuotaState.getLastUpdate()); assertTrue(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); assertEquals(LAST_UPDATE_3,quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); assertNoopLimiter(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testQuotaStateUpdateBypassThrottle(){ final long LAST_UPDATE=10; UserQuotaState quotaInfo=new UserQuotaState(); assertEquals(0,quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); UserQuotaState otherQuotaState=new UserQuotaState(LAST_UPDATE); assertEquals(LAST_UPDATE,otherQuotaState.getLastUpdate()); assertTrue(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); assertEquals(LAST_UPDATE,quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); assertTrue(quotaInfo.getGlobalLimiter() == quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); assertNoopLimiter(quotaInfo.getTableLimiter(UNKNOWN_TABLE_NAME)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testQuotaStateUpdateGlobalThrottle(){ final int NUM_GLOBAL_THROTTLE_1=3; final int NUM_GLOBAL_THROTTLE_2=11; final long LAST_UPDATE_1=10; final long LAST_UPDATE_2=20; final long LAST_UPDATE_3=30; QuotaState quotaInfo=new QuotaState(); assertEquals(0,quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); QuotaState otherQuotaState=new QuotaState(LAST_UPDATE_1); otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_1)); assertEquals(LAST_UPDATE_1,otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); assertEquals(LAST_UPDATE_1,quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getGlobalLimiter(),NUM_GLOBAL_THROTTLE_1); otherQuotaState=new QuotaState(LAST_UPDATE_2); otherQuotaState.setQuotas(buildReqNumThrottle(NUM_GLOBAL_THROTTLE_2)); assertEquals(LAST_UPDATE_2,otherQuotaState.getLastUpdate()); assertFalse(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); assertEquals(LAST_UPDATE_2,quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getGlobalLimiter(),NUM_GLOBAL_THROTTLE_2 - NUM_GLOBAL_THROTTLE_1); otherQuotaState=new QuotaState(LAST_UPDATE_3); assertEquals(LAST_UPDATE_3,otherQuotaState.getLastUpdate()); assertTrue(otherQuotaState.isBypass()); quotaInfo.update(otherQuotaState); assertEquals(LAST_UPDATE_3,quotaInfo.getLastUpdate()); assertTrue(quotaInfo.isBypass()); assertNoopLimiter(quotaInfo.getGlobalLimiter()); }

Class: org.apache.hadoop.hbase.quotas.TestRateLimiter

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOverconsumptionAverageIntervalRefillStrategy(){ RateLimiter limiter=new AverageIntervalRateLimiter(); limiter.set(10,TimeUnit.SECONDS); assertTrue(limiter.canExecute()); limiter.consume(20); assertEquals(100,limiter.waitInterval(1)); assertEquals(1000,limiter.waitInterval(10)); limiter.setNextRefillTime(limiter.getNextRefillTime() - 900); assertTrue(limiter.canExecute(1)); limiter.setNextRefillTime(limiter.getNextRefillTime() - 100); assertTrue(limiter.canExecute()); assertEquals(0,limiter.waitInterval()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOverconsumptionFixedIntervalRefillStrategy() throws InterruptedException { RateLimiter limiter=new FixedIntervalRateLimiter(); limiter.set(10,TimeUnit.SECONDS); assertTrue(limiter.canExecute()); limiter.consume(20); assertEquals(1000,limiter.waitInterval(1)); assertEquals(1000,limiter.waitInterval(10)); limiter.setNextRefillTime(limiter.getNextRefillTime() - 900); assertFalse(limiter.canExecute(1)); limiter.setNextRefillTime(limiter.getNextRefillTime() - 100); assertTrue(limiter.canExecute()); assertEquals(0,limiter.waitInterval()); }

InternalCallVerifier EqualityVerifier 
@Test public void testCanExecuteOfAverageIntervalRateLimiter() throws InterruptedException { RateLimiter limiter=new AverageIntervalRateLimiter(); limiter.set(100,TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(50,testCanExecuteByRate(limiter,50)); limiter.set(100,TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(100,testCanExecuteByRate(limiter,100)); limiter.set(100,TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(200,testCanExecuteByRate(limiter,200)); limiter.set(100,TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(200,testCanExecuteByRate(limiter,500)); }

InternalCallVerifier EqualityVerifier 
@Test public void testRefillOfAverageIntervalRateLimiter() throws InterruptedException { RateLimiter limiter=new AverageIntervalRateLimiter(); limiter.set(60,TimeUnit.SECONDS); assertEquals(60,limiter.getAvailable()); assertEquals(60,limiter.refill(limiter.getLimit())); limiter.consume(30); limiter.setNextRefillTime(limiter.getNextRefillTime() - 200); assertEquals(12,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 500); assertEquals(30,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); assertEquals(60,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 3000); assertEquals(60,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 5000); assertEquals(60,limiter.refill(limiter.getLimit())); }

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLimiterBySmallerRate() throws InterruptedException { RateLimiter limiter=new FixedIntervalRateLimiter(); limiter.set(10,TimeUnit.SECONDS); int count=0; while ((count++) < 10) { limiter.setNextRefillTime(limiter.getNextRefillTime() - 500); for (int i=0; i < 3; i++) { assertEquals(true,limiter.canExecute()); limiter.consume(); } } }

InternalCallVerifier EqualityVerifier 
@Test public void testRefillOfFixedIntervalRateLimiter() throws InterruptedException { RateLimiter limiter=new FixedIntervalRateLimiter(); limiter.set(60,TimeUnit.SECONDS); assertEquals(60,limiter.getAvailable()); assertEquals(60,limiter.refill(limiter.getLimit())); limiter.consume(30); limiter.setNextRefillTime(limiter.getNextRefillTime() - 200); assertEquals(0,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 500); assertEquals(0,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); assertEquals(60,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 3000); assertEquals(60,limiter.refill(limiter.getLimit())); limiter.setNextRefillTime(limiter.getNextRefillTime() - 5000); assertEquals(60,limiter.refill(limiter.getLimit())); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFixedIntervalResourceAvailability() throws Exception { RateLimiter limiter=new FixedIntervalRateLimiter(); limiter.set(10,TimeUnit.SECONDS); assertTrue(limiter.canExecute(10)); limiter.consume(3); assertEquals(7,limiter.getAvailable()); assertFalse(limiter.canExecute(10)); limiter.setNextRefillTime(limiter.getNextRefillTime() - 1000); assertTrue(limiter.canExecute(10)); assertEquals(10,limiter.getAvailable()); }

InternalCallVerifier EqualityVerifier 
@Test public void testCanExecuteOfFixedIntervalRateLimiter() throws InterruptedException { RateLimiter limiter=new FixedIntervalRateLimiter(); limiter.set(100,TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(50,testCanExecuteByRate(limiter,50)); limiter.set(100,TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(100,testCanExecuteByRate(limiter,100)); limiter.set(100,TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(100,testCanExecuteByRate(limiter,200)); }

Class: org.apache.hadoop.hbase.regionserver.TestAtomicOperation

IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAppendMultiThreads() throws IOException { LOG.info("Starting test testAppendMultiThreads"); initHRegion(tableName,name.getMethodName(),new int[]{1,3},fam1,fam2); int numThreads=100; int opsPerThread=100; AtomicOperation[] all=new AtomicOperation[numThreads]; final byte[] val=new byte[]{1}; AtomicInteger failures=new AtomicInteger(0); for (int i=0; i < numThreads; i++) { all[i]=new AtomicOperation(region,opsPerThread,null,failures){ @Override public void run(){ for (int i=0; i < numOps; i++) { try { Append a=new Append(row); a.add(fam1,qual1,val); a.add(fam1,qual2,val); a.add(fam2,qual3,val); a.setDurability(Durability.ASYNC_WAL); region.append(a,HConstants.NO_NONCE,HConstants.NO_NONCE); Get g=new Get(row); Result result=region.get(g); assertEquals(result.getValue(fam1,qual1).length,result.getValue(fam1,qual2).length); assertEquals(result.getValue(fam1,qual1).length,result.getValue(fam2,qual3).length); } catch ( IOException e) { e.printStackTrace(); failures.incrementAndGet(); fail(); } } } } ; } for (int i=0; i < numThreads; i++) { all[i].start(); } for (int i=0; i < numThreads; i++) { try { all[i].join(); } catch ( InterruptedException e) { } } assertEquals(0,failures.get()); Get g=new Get(row); Result result=region.get(g); assertEquals(result.getValue(fam1,qual1).length,10000); assertEquals(result.getValue(fam1,qual2).length,10000); assertEquals(result.getValue(fam2,qual3).length,10000); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test basic append operation. * More tests in * @see org.apache.hadoop.hbase.client.TestFromClientSide#testAppend() */ @Test public void testAppend() throws IOException { initHRegion(tableName,name.getMethodName(),fam1); String v1="Ultimate Answer to the Ultimate Question of Life," + " The Universe, and Everything"; String v2=" is... 42."; Append a=new Append(row); a.setReturnResults(false); a.add(fam1,qual1,Bytes.toBytes(v1)); a.add(fam1,qual2,Bytes.toBytes(v2)); assertNull(region.append(a,HConstants.NO_NONCE,HConstants.NO_NONCE)); a=new Append(row); a.add(fam1,qual1,Bytes.toBytes(v2)); a.add(fam1,qual2,Bytes.toBytes(v1)); Result result=region.append(a,HConstants.NO_NONCE,HConstants.NO_NONCE); assertEquals(0,Bytes.compareTo(Bytes.toBytes(v1 + v2),result.getValue(fam1,qual1))); assertEquals(0,Bytes.compareTo(Bytes.toBytes(v2 + v1),result.getValue(fam1,qual2))); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testIncrementWithNonExistingFamily() throws IOException { initHRegion(tableName,name.getMethodName(),fam1); final Increment inc=new Increment(row); inc.addColumn(fam1,qual1,1); inc.addColumn(fam2,qual2,1); inc.setDurability(Durability.ASYNC_WAL); try { region.increment(inc,HConstants.NO_NONCE,HConstants.NO_NONCE); } catch ( NoSuchColumnFamilyException e) { final Get g=new Get(row); final Result result=region.get(g); assertEquals(null,result.getValue(fam1,qual1)); assertEquals(null,result.getValue(fam2,qual2)); } catch ( Exception e) { fail("Increment operation should fail with NoSuchColumnFamilyException."); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAppendWithNonExistingFamily() throws IOException { initHRegion(tableName,name.getMethodName(),fam1); final String v1="Value"; final Append a=new Append(row); a.add(fam1,qual1,Bytes.toBytes(v1)); a.add(fam2,qual2,Bytes.toBytes(v1)); Result result=null; try { result=region.append(a,HConstants.NO_NONCE,HConstants.NO_NONCE); fail("Append operation should fail with NoSuchColumnFamilyException."); } catch ( NoSuchColumnFamilyException e) { assertEquals(null,result); } catch ( Exception e) { fail("Append operation should fail with NoSuchColumnFamilyException."); } }

Class: org.apache.hadoop.hbase.regionserver.TestClusterId

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testClusterId() throws Exception { TEST_UTIL.startMiniZKCluster(); TEST_UTIL.startMiniDFSCluster(1); Configuration conf=new Configuration(TEST_UTIL.getConfiguration()); CoordinatedStateManager cp=CoordinatedStateManagerFactory.getCoordinatedStateManager(conf); rst=JVMClusterUtil.createRegionServerThread(conf,cp,HRegionServer.class,0); rst.start(); Thread.sleep(10000); TEST_UTIL.startMiniHBaseCluster(1,0); rst.waitForServerOnline(); String clusterId=ZKClusterId.readClusterIdZNode(TEST_UTIL.getZooKeeperWatcher()); assertNotNull(clusterId); assertEquals(clusterId,rst.getRegionServer().getClusterId()); }

InternalCallVerifier EqualityVerifier 
@Test public void testRewritingClusterIdToPB() throws Exception { TEST_UTIL.startMiniZKCluster(); TEST_UTIL.startMiniDFSCluster(1); TEST_UTIL.createRootDir(); TEST_UTIL.getConfiguration().setBoolean("hbase.replication",true); Path rootDir=FSUtils.getRootDir(TEST_UTIL.getConfiguration()); FileSystem fs=rootDir.getFileSystem(TEST_UTIL.getConfiguration()); Path filePath=new Path(rootDir,HConstants.CLUSTER_ID_FILE_NAME); FSDataOutputStream s=null; try { s=fs.create(filePath); s.writeUTF(UUID.randomUUID().toString()); } finally { if (s != null) { s.close(); } } TEST_UTIL.startMiniHBaseCluster(1,1); HMaster master=TEST_UTIL.getHBaseCluster().getMaster(); assertEquals(2,master.getServerManager().getOnlineServersList().size()); }

Class: org.apache.hadoop.hbase.regionserver.TestCompactSplitThread

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testThreadPoolSizeTuning() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS,3); conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS,4); conf.setInt(CompactSplitThread.SPLIT_THREADS,5); conf.setInt(CompactSplitThread.MERGE_THREADS,6); TEST_UTIL.startMiniCluster(1); Connection conn=ConnectionFactory.createConnection(conf); try { HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family)); htd.setCompactionEnabled(false); TEST_UTIL.getHBaseAdmin().createTable(htd); TEST_UTIL.waitTableAvailable(tableName); HRegionServer regionServer=TEST_UTIL.getRSForFirstRegionInTable(tableName); assertEquals(3,regionServer.compactSplitThread.getLargeCompactionThreadNum()); assertEquals(4,regionServer.compactSplitThread.getSmallCompactionThreadNum()); assertEquals(5,regionServer.compactSplitThread.getSplitThreadNum()); assertEquals(6,regionServer.compactSplitThread.getMergeThreadNum()); conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS,4); conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS,5); conf.setInt(CompactSplitThread.SPLIT_THREADS,6); conf.setInt(CompactSplitThread.MERGE_THREADS,7); try { regionServer.compactSplitThread.onConfigurationChange(conf); } catch ( IllegalArgumentException iae) { Assert.fail("Update bigger configuration failed!"); } assertEquals(4,regionServer.compactSplitThread.getLargeCompactionThreadNum()); assertEquals(5,regionServer.compactSplitThread.getSmallCompactionThreadNum()); assertEquals(6,regionServer.compactSplitThread.getSplitThreadNum()); assertEquals(7,regionServer.compactSplitThread.getMergeThreadNum()); conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS,2); conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS,3); conf.setInt(CompactSplitThread.SPLIT_THREADS,4); conf.setInt(CompactSplitThread.MERGE_THREADS,5); try { regionServer.compactSplitThread.onConfigurationChange(conf); } catch ( IllegalArgumentException iae) { Assert.fail("Update smaller configuration failed!"); } assertEquals(2,regionServer.compactSplitThread.getLargeCompactionThreadNum()); assertEquals(3,regionServer.compactSplitThread.getSmallCompactionThreadNum()); assertEquals(4,regionServer.compactSplitThread.getSplitThreadNum()); assertEquals(5,regionServer.compactSplitThread.getMergeThreadNum()); } finally { conn.close(); TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.regionserver.TestCompaction

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Verify that you can stop a long-running compaction * (used during RS shutdown) * @throws Exception */ @Test public void testInterruptCompaction() throws Exception { assertEquals(0,count()); int origWI=HStore.closeCheckInterval; HStore.closeCheckInterval=10 * 1000; try { int jmax=(int)Math.ceil(15.0 / compactionThreshold); byte[] pad=new byte[1000]; for (int i=0; i < compactionThreshold; i++) { Table loader=new RegionAsTable(r); Put p=new Put(Bytes.add(STARTROW,Bytes.toBytes(i))); p.setDurability(Durability.SKIP_WAL); for (int j=0; j < jmax; j++) { p.addColumn(COLUMN_FAMILY,Bytes.toBytes(j),pad); } HBaseTestCase.addContent(loader,Bytes.toString(COLUMN_FAMILY)); loader.put(p); r.flush(true); } HRegion spyR=spy(r); doAnswer(new Answer(){ public Object answer( InvocationOnMock invocation) throws Throwable { r.writestate.writesEnabled=false; return invocation.callRealMethod(); } } ).when(spyR).doRegionCompactionPrep(); spyR.compactStores(); Store s=r.stores.get(COLUMN_FAMILY); assertEquals(compactionThreshold,s.getStorefilesCount()); assertTrue(s.getStorefilesSize() > 15 * 1000); FileStatus[] ls=r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir()); assertEquals(0,ls.length); } finally { r.writestate.writesEnabled=true; HStore.closeCheckInterval=origWI; for (int i=0; i < compactionThreshold; i++) { Delete delete=new Delete(Bytes.add(STARTROW,Bytes.toBytes(i))); byte[][] famAndQf={COLUMN_FAMILY,null}; delete.addFamily(famAndQf[0]); r.delete(delete); } r.flush(true); final int ttl=1000; for ( Store hstore : this.r.stores.values()) { HStore store=(HStore)hstore; ScanInfo old=store.getScanInfo(); ScanInfo si=new ScanInfo(old.getConfiguration(),old.getFamily(),old.getMinVersions(),old.getMaxVersions(),ttl,old.getKeepDeletedCells(),0,old.getComparator()); store.setScanInfo(si); } Thread.sleep(ttl); r.compact(true); assertEquals(0,count()); } }

Class: org.apache.hadoop.hbase.regionserver.TestCompoundBloomFilter

TestInitializer InternalCallVerifier NullVerifier HybridVerifier 
@Before public void setUp() throws IOException { conf=TEST_UTIL.getConfiguration(); conf.setInt(HFile.FORMAT_VERSION_KEY,HFile.MAX_FORMAT_VERSION); fs=FileSystem.get(conf); cacheConf=new CacheConfig(conf); blockCache=cacheConf.getBlockCache(); assertNotNull(blockCache); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testCreateKey(){ byte[] row="myRow".getBytes(); byte[] qualifier="myQualifier".getBytes(); byte[] rowKey=KeyValueUtil.createFirstOnRow(row,0,row.length,new byte[0],0,0,row,0,0).getKey(); byte[] rowColKey=KeyValueUtil.createFirstOnRow(row,0,row.length,new byte[0],0,0,qualifier,0,qualifier.length).getKey(); KeyValue rowKV=KeyValueUtil.createKeyValueFromKey(rowKey); KeyValue rowColKV=KeyValueUtil.createKeyValueFromKey(rowColKey); assertEquals(rowKV.getTimestamp(),rowColKV.getTimestamp()); assertEquals(Bytes.toStringBinary(rowKV.getRowArray(),rowKV.getRowOffset(),rowKV.getRowLength()),Bytes.toStringBinary(rowColKV.getRowArray(),rowColKV.getRowOffset(),rowColKV.getRowLength())); assertEquals(0,rowKV.getQualifierLength()); }

Class: org.apache.hadoop.hbase.regionserver.TestDefaultStoreEngine

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCustomParts() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.set(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY,DummyCompactor.class.getName()); conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,DummyCompactionPolicy.class.getName()); conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,DummyStoreFlusher.class.getName()); Store mockStore=Mockito.mock(Store.class); StoreEngine se=StoreEngine.create(mockStore,conf,CellComparator.COMPARATOR); Assert.assertTrue(se instanceof DefaultStoreEngine); Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy); Assert.assertTrue(se.getStoreFlusher() instanceof DummyStoreFlusher); Assert.assertTrue(se.getCompactor() instanceof DummyCompactor); }

Class: org.apache.hadoop.hbase.regionserver.TestDeleteMobTable

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteNonMobTable() throws Exception { TableName tn=TableName.valueOf("testDeleteNonMobTable"); HTableDescriptor htd=createTableDescriptor(tn,false); HColumnDescriptor hcd=htd.getFamily(FAMILY); Table table=createTableWithOneFile(htd); try { Assert.assertEquals(0,countMobFiles(tn,hcd.getNameAsString())); Assert.assertEquals(0,countArchiveMobFiles(tn,hcd.getNameAsString())); Assert.assertFalse(mobTableDirExist(tn)); } finally { table.close(); TEST_UTIL.deleteTable(tn); } Assert.assertFalse(TEST_UTIL.getHBaseAdmin().tableExists(tn)); Assert.assertEquals(0,countMobFiles(tn,hcd.getNameAsString())); Assert.assertEquals(0,countArchiveMobFiles(tn,hcd.getNameAsString())); Assert.assertFalse(mobTableDirExist(tn)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMobFamilyDelete() throws Exception { TableName tn=TableName.valueOf("testMobFamilyDelete"); HTableDescriptor htd=createTableDescriptor(tn,true); HColumnDescriptor hcd=htd.getFamily(FAMILY); htd.addFamily(new HColumnDescriptor(Bytes.toBytes("family2"))); Table table=createTableWithOneFile(htd); try { Assert.assertEquals(1,countMobFiles(tn,hcd.getNameAsString())); Assert.assertEquals(0,countArchiveMobFiles(tn,hcd.getNameAsString())); String fileName=assertHasOneMobRow(table,tn,hcd.getNameAsString()); Assert.assertFalse(mobArchiveExist(tn,hcd.getNameAsString(),fileName)); Assert.assertTrue(mobTableDirExist(tn)); TEST_UTIL.getHBaseAdmin().deleteColumnFamily(tn,FAMILY); Assert.assertEquals(0,countMobFiles(tn,hcd.getNameAsString())); Assert.assertEquals(1,countArchiveMobFiles(tn,hcd.getNameAsString())); Assert.assertTrue(mobArchiveExist(tn,hcd.getNameAsString(),fileName)); Assert.assertFalse(mobColumnFamilyDirExist(tn,hcd.getNameAsString())); } finally { table.close(); TEST_UTIL.deleteTable(tn); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteMobTable() throws Exception { TableName tn=TableName.valueOf("testDeleteMobTable"); HTableDescriptor htd=createTableDescriptor(tn,true); HColumnDescriptor hcd=htd.getFamily(FAMILY); String fileName=null; Table table=createTableWithOneFile(htd); try { Assert.assertEquals(1,countMobFiles(tn,hcd.getNameAsString())); Assert.assertEquals(0,countArchiveMobFiles(tn,hcd.getNameAsString())); fileName=assertHasOneMobRow(table,tn,hcd.getNameAsString()); Assert.assertFalse(mobArchiveExist(tn,hcd.getNameAsString(),fileName)); Assert.assertTrue(mobTableDirExist(tn)); } finally { table.close(); TEST_UTIL.deleteTable(tn); } Assert.assertFalse(TEST_UTIL.getHBaseAdmin().tableExists(tn)); Assert.assertEquals(0,countMobFiles(tn,hcd.getNameAsString())); Assert.assertEquals(1,countArchiveMobFiles(tn,hcd.getNameAsString())); Assert.assertTrue(mobArchiveExist(tn,hcd.getNameAsString(),fileName)); Assert.assertFalse(mobTableDirExist(tn)); }

Class: org.apache.hadoop.hbase.regionserver.TestEncryptionKeyRotation

InternalCallVerifier BooleanVerifier 
@Test public void testCFKeyRotation() throws Exception { HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("default","testCFKeyRotation")); HColumnDescriptor hcd=new HColumnDescriptor("cf"); String algorithm=conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); hcd.setEncryptionType(algorithm); hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,"hbase",initialCFKey)); htd.addFamily(hcd); createTableAndFlush(htd); final List initialPaths=findStorefilePaths(htd.getTableName()); assertTrue(initialPaths.size() > 0); for ( Path path : initialPaths) { assertTrue("Store file " + path + " has incorrect key",Bytes.equals(initialCFKey.getEncoded(),extractHFileKey(path))); } hcd=htd.getFamily(Bytes.toBytes("cf")); hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,User.getCurrent().getShortName()),secondCFKey)); TEST_UTIL.getHBaseAdmin().modifyColumnFamily(htd.getTableName(),hcd); Thread.sleep(5000); TEST_UTIL.getHBaseAdmin().majorCompact(htd.getTableName()); final List updatePaths=findCompactedStorefilePaths(htd.getTableName()); TEST_UTIL.waitFor(30000,1000,true,new Predicate(){ @Override public boolean evaluate() throws Exception { boolean found=false; for ( Path path : updatePaths) { found=TEST_UTIL.getTestFileSystem().exists(path); if (found) { LOG.info("Found " + path); break; } } return !found; } } ); Thread.sleep(1000); waitForCompaction(htd.getTableName()); List pathsAfterCompaction=findStorefilePaths(htd.getTableName()); assertTrue(pathsAfterCompaction.size() > 0); for ( Path path : pathsAfterCompaction) { assertTrue("Store file " + path + " has incorrect key",Bytes.equals(secondCFKey.getEncoded(),extractHFileKey(path))); } List compactedPaths=findCompactedStorefilePaths(htd.getTableName()); assertTrue(compactedPaths.size() > 0); for ( Path path : compactedPaths) { assertTrue("Store file " + path + " retains initial key",Bytes.equals(initialCFKey.getEncoded(),extractHFileKey(path))); } }

InternalCallVerifier BooleanVerifier 
@Test public void testMasterKeyRotation() throws Exception { HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("default","testMasterKeyRotation")); HColumnDescriptor hcd=new HColumnDescriptor("cf"); String algorithm=conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); hcd.setEncryptionType(algorithm); hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,"hbase",initialCFKey)); htd.addFamily(hcd); createTableAndFlush(htd); List storeFilePaths=findStorefilePaths(htd.getTableName()); assertTrue(storeFilePaths.size() > 0); for ( Path path : storeFilePaths) { assertTrue("Store file " + path + " has incorrect key",Bytes.equals(initialCFKey.getEncoded(),extractHFileKey(path))); } TEST_UTIL.shutdownMiniHBaseCluster(); conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,"other"); conf.set(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY,"hbase"); TEST_UTIL.startMiniHBaseCluster(1,1); TEST_UTIL.waitTableAvailable(htd.getName(),5000); storeFilePaths=findStorefilePaths(htd.getTableName()); assertTrue(storeFilePaths.size() > 0); for ( Path path : storeFilePaths) { assertTrue("Store file " + path + " has incorrect key",Bytes.equals(initialCFKey.getEncoded(),extractHFileKey(path))); } }

Class: org.apache.hadoop.hbase.regionserver.TestEncryptionRandomKeying

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testRandomKeying() throws Exception { final List initialPaths=findStorefilePaths(htd.getTableName()); assertTrue(initialPaths.size() > 0); for ( Path path : initialPaths) { assertNotNull("Store file " + path + " is not encrypted",extractHFileKey(path)); } }

Class: org.apache.hadoop.hbase.regionserver.TestEndToEndSplitTransaction

InternalCallVerifier BooleanVerifier 
@Test public void testMasterOpsWhileSplitting() throws Exception { TableName tableName=TableName.valueOf("TestSplit"); byte[] familyName=Bytes.toBytes("fam"); try (Table ht=TEST_UTIL.createTable(tableName,familyName)){ TEST_UTIL.loadTable(ht,familyName,false); } HRegionServer server=TEST_UTIL.getHBaseCluster().getRegionServer(0); byte[] firstRow=Bytes.toBytes("aaa"); byte[] splitRow=Bytes.toBytes("lll"); byte[] lastRow=Bytes.toBytes("zzz"); try (Connection conn=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())){ byte[] regionName=conn.getRegionLocator(tableName).getRegionLocation(splitRow).getRegionInfo().getRegionName(); Region region=server.getRegion(regionName); SplitTransactionImpl split=new SplitTransactionImpl((HRegion)region,splitRow); split.prepare(); PairOfSameType regions=split.createDaughters(server,server,null); assertFalse(test(conn,tableName,firstRow,server)); assertFalse(test(conn,tableName,lastRow,server)); split.openDaughters(server,null,regions.getFirst(),regions.getSecond()); assertFalse(test(conn,tableName,firstRow,server)); assertFalse(test(conn,tableName,lastRow,server)); server.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode.SPLIT,region.getRegionInfo(),regions.getFirst().getRegionInfo(),regions.getSecond().getRegionInfo()); server.addToOnlineRegions(regions.getSecond()); assertFalse(test(conn,tableName,firstRow,server)); assertTrue(test(conn,tableName,lastRow,server)); server.addToOnlineRegions(regions.getFirst()); assertTrue(test(conn,tableName,firstRow,server)); assertTrue(test(conn,tableName,lastRow,server)); assertTrue(test(conn,tableName,firstRow,server)); assertTrue(test(conn,tableName,lastRow,server)); } }

Class: org.apache.hadoop.hbase.regionserver.TestFSErrorsExposed

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier 
/** * Injects errors into the pread calls of an on-disk file, and makes * sure those bubble up to the HFile scanner */ @Test public void testHFileScannerThrowsErrors() throws IOException { Path hfilePath=new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"),"regionname"),"familyname"); HFileSystem hfs=(HFileSystem)util.getTestFileSystem(); FaultyFileSystem faultyfs=new FaultyFileSystem(hfs.getBackingFs()); FileSystem fs=new HFileSystem(faultyfs); CacheConfig cacheConf=new CacheConfig(util.getConfiguration()); HFileContext meta=new HFileContextBuilder().withBlockSize(2 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(util.getConfiguration(),cacheConf,hfs).withOutputDir(hfilePath).withFileContext(meta).build(); TestStoreFile.writeStoreFile(writer,Bytes.toBytes("cf"),Bytes.toBytes("qual")); StoreFile sf=new StoreFile(fs,writer.getPath(),util.getConfiguration(),cacheConf,BloomType.NONE); StoreFile.Reader reader=sf.createReader(); HFileScanner scanner=reader.getScanner(false,true); FaultyInputStream inStream=faultyfs.inStreams.get(0).get(); assertNotNull(inStream); scanner.seekTo(); assertTrue(scanner.next()); faultyfs.startFaults(); try { int scanned=0; while (scanner.next()) { scanned++; } fail("Scanner didn't throw after faults injected"); } catch ( IOException ioe) { LOG.info("Got expected exception",ioe); assertTrue(ioe.getMessage().contains("Fault")); } reader.close(true); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier 
/** * Injects errors into the pread calls of an on-disk file, and makes * sure those bubble up to the StoreFileScanner */ @Test public void testStoreFileScannerThrowsErrors() throws IOException { Path hfilePath=new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"),"regionname"),"familyname"); HFileSystem hfs=(HFileSystem)util.getTestFileSystem(); FaultyFileSystem faultyfs=new FaultyFileSystem(hfs.getBackingFs()); HFileSystem fs=new HFileSystem(faultyfs); CacheConfig cacheConf=new CacheConfig(util.getConfiguration()); HFileContext meta=new HFileContextBuilder().withBlockSize(2 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(util.getConfiguration(),cacheConf,hfs).withOutputDir(hfilePath).withFileContext(meta).build(); TestStoreFile.writeStoreFile(writer,Bytes.toBytes("cf"),Bytes.toBytes("qual")); StoreFile sf=new StoreFile(fs,writer.getPath(),util.getConfiguration(),cacheConf,BloomType.NONE); List scanners=StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf),false,true,false,false,0); KeyValueScanner scanner=scanners.get(0); FaultyInputStream inStream=faultyfs.inStreams.get(0).get(); assertNotNull(inStream); scanner.seek(KeyValue.LOWESTKEY); assertNotNull(scanner.next()); faultyfs.startFaults(); try { int scanned=0; while (scanner.next() != null) { scanned++; } fail("Scanner didn't throw after faults injected"); } catch ( IOException ioe) { LOG.info("Got expected exception",ioe); assertTrue(ioe.getMessage().contains("Could not iterate")); } scanner.close(); }

Class: org.apache.hadoop.hbase.regionserver.TestFlushRegionEntry

InternalCallVerifier EqualityVerifier 
@Test public void testFlushRegionEntryEquality(){ HRegionInfo hri=new HRegionInfo(1,TableName.valueOf("TestTable"),0); HRegion r=mock(HRegion.class); doReturn(hri).when(r).getRegionInfo(); FlushRegionEntry entry=new FlushRegionEntry(r,true); FlushRegionEntry other=new FlushRegionEntry(r,true); assertEquals(entry.hashCode(),other.hashCode()); assertEquals(entry,other); }

Class: org.apache.hadoop.hbase.regionserver.TestGetClosestAtOrBefore

InternalCallVerifier BooleanVerifier PublicFieldVerifier 
/** * Test file of multiple deletes and with deletes as final key. * @see HBASE-751 */ @Test public void testGetClosestRowBefore3() throws IOException { Region region=null; byte[] c0=UTIL.COLUMNS[0]; byte[] c1=UTIL.COLUMNS[1]; try { TableName tn=TableName.valueOf(testName.getMethodName()); HTableDescriptor htd=UTIL.createTableDescriptor(tn); region=UTIL.createLocalHRegion(htd,null,null); Put p=new Put(T00); p.addColumn(c0,c0,T00); region.put(p); p=new Put(T10); p.addColumn(c0,c0,T10); region.put(p); p=new Put(T20); p.addColumn(c0,c0,T20); region.put(p); Result r=UTIL.getClosestRowBefore(region,T20,c0); assertTrue(Bytes.equals(T20,r.getRow())); Delete d=new Delete(T20); d.addColumn(c0,c0); region.delete(d); r=UTIL.getClosestRowBefore(region,T20,c0); assertTrue(Bytes.equals(T10,r.getRow())); p=new Put(T30); p.addColumn(c0,c0,T30); region.put(p); r=UTIL.getClosestRowBefore(region,T30,c0); assertTrue(Bytes.equals(T30,r.getRow())); d=new Delete(T30); d.addColumn(c0,c0); region.delete(d); r=UTIL.getClosestRowBefore(region,T30,c0); assertTrue(Bytes.equals(T10,r.getRow())); r=UTIL.getClosestRowBefore(region,T31,c0); assertTrue(Bytes.equals(T10,r.getRow())); region.flush(true); r=UTIL.getClosestRowBefore(region,T30,c0); assertTrue(Bytes.equals(T10,r.getRow())); r=UTIL.getClosestRowBefore(region,T31,c0); assertTrue(Bytes.equals(T10,r.getRow())); p=new Put(T20); p.addColumn(c1,c1,T20); region.put(p); r=UTIL.getClosestRowBefore(region,T30,c0); assertTrue(Bytes.equals(T10,r.getRow())); r=UTIL.getClosestRowBefore(region,T31,c0); assertTrue(Bytes.equals(T10,r.getRow())); region.flush(true); r=UTIL.getClosestRowBefore(region,T30,c0); assertTrue(Bytes.equals(T10,r.getRow())); r=UTIL.getClosestRowBefore(region,T31,c0); assertTrue(Bytes.equals(T10,r.getRow())); d=new Delete(T20); d.addColumn(c1,c1); region.delete(d); r=UTIL.getClosestRowBefore(region,T30,c0); assertTrue(Bytes.equals(T10,r.getRow())); r=UTIL.getClosestRowBefore(region,T31,c0); assertTrue(Bytes.equals(T10,r.getRow())); region.flush(true); r=UTIL.getClosestRowBefore(region,T31,c0); assertTrue(Bytes.equals(T10,r.getRow())); p=new Put(T11); p.addColumn(c0,c0,T11); region.put(p); d=new Delete(T10); d.addColumn(c1,c1); r=UTIL.getClosestRowBefore(region,T12,c0); assertTrue(Bytes.equals(T11,r.getRow())); } finally { if (region != null) { try { WAL wal=((HRegion)region).getWAL(); ((HRegion)region).close(); wal.close(); } catch ( Exception e) { e.printStackTrace(); } } } }

InternalCallVerifier BooleanVerifier PublicFieldVerifier 
/** * For HBASE-694 */ @Test public void testGetClosestRowBefore2() throws IOException { Region region=null; byte[] c0=UTIL.COLUMNS[0]; try { TableName tn=TableName.valueOf(testName.getMethodName()); HTableDescriptor htd=UTIL.createTableDescriptor(tn); region=UTIL.createLocalHRegion(htd,null,null); Put p=new Put(T10); p.addColumn(c0,c0,T10); region.put(p); p=new Put(T30); p.addColumn(c0,c0,T30); region.put(p); p=new Put(T40); p.addColumn(c0,c0,T40); region.put(p); Result r=UTIL.getClosestRowBefore(region,T35,c0); assertTrue(Bytes.equals(T30,r.getRow())); region.flush(true); r=UTIL.getClosestRowBefore(region,T35,c0); assertTrue(Bytes.equals(T30,r.getRow())); p=new Put(T20); p.addColumn(c0,c0,T20); region.put(p); r=UTIL.getClosestRowBefore(region,T35,c0); assertTrue(Bytes.equals(T30,r.getRow())); region.flush(true); r=UTIL.getClosestRowBefore(region,T35,c0); assertTrue(Bytes.equals(T30,r.getRow())); } finally { if (region != null) { try { WAL wal=((HRegion)region).getWAL(); ((HRegion)region).close(); wal.close(); } catch ( Exception e) { e.printStackTrace(); } } } }

Class: org.apache.hadoop.hbase.regionserver.TestHMobStore

InternalCallVerifier EqualityVerifier 
@Test public void testResolve() throws Exception { final Configuration conf=HBaseConfiguration.create(); init(name.getMethodName(),conf,true); String targetPathName=MobUtils.formatDate(currentDate); Path targetPath=new Path(store.getPath(),targetPathName); store.commitFile(mobFilePath,targetPath); Cell resultCell1=store.resolve(seekKey1,false); Cell resultCell2=store.resolve(seekKey2,false); Cell resultCell3=store.resolve(seekKey3,false); Assert.assertEquals(Bytes.toString(value),Bytes.toString(CellUtil.cloneValue(resultCell1))); Assert.assertEquals(Bytes.toString(value),Bytes.toString(CellUtil.cloneValue(resultCell2))); Assert.assertEquals(Bytes.toString(value2),Bytes.toString(CellUtil.cloneValue(resultCell3))); }

InternalCallVerifier BooleanVerifier 
@Test public void testCommitFile() throws Exception { final Configuration conf=HBaseConfiguration.create(); init(name.getMethodName(),conf,true); String targetPathName=MobUtils.formatDate(new Date()); Path targetPath=new Path(store.getPath(),(targetPathName + Path.SEPARATOR + mobFilePath.getName())); fs.delete(targetPath,true); Assert.assertFalse(fs.exists(targetPath)); store.commitFile(mobFilePath,targetPath); Assert.assertTrue(fs.exists(targetPath)); }

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMOBStoreEncryption() throws Exception { final Configuration conf=TEST_UTIL.getConfiguration(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,KeyProviderForTesting.class.getName()); conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,"hbase"); SecureRandom rng=new SecureRandom(); byte[] keyBytes=new byte[AES.KEY_LENGTH]; rng.nextBytes(keyBytes); String algorithm=conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); Key cfKey=new SecretKeySpec(keyBytes,algorithm); HColumnDescriptor hcd=new HColumnDescriptor(family); hcd.setMobEnabled(true); hcd.setMobThreshold(100); hcd.setMaxVersions(4); hcd.setEncryptionType(algorithm); hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf,conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,User.getCurrent().getShortName()),cfKey)); init(name.getMethodName(),conf,hcd,false); this.store.add(new KeyValue(row,family,qf1,1,value)); this.store.add(new KeyValue(row,family,qf2,1,value)); this.store.add(new KeyValue(row,family,qf3,1,value)); flush(1); this.store.add(new KeyValue(row,family,qf4,1,value)); this.store.add(new KeyValue(row,family,qf5,1,value)); this.store.add(new KeyValue(row,family,qf6,1,value)); flush(2); Collection storefiles=this.store.getStorefiles(); checkMobHFileEncrytption(storefiles); Scan scan=new Scan(get); InternalScanner scanner=(InternalScanner)store.getScanner(scan,scan.getFamilyMap().get(store.getFamily().getName()),0); List results=new ArrayList(); scanner.next(results); Collections.sort(results,KeyValue.COMPARATOR); scanner.close(); Assert.assertEquals(expected.size(),results.size()); for (int i=0; i < results.size(); i++) { Assert.assertEquals(expected.get(i),results.get(i)); } this.store.triggerMajorCompaction(); CompactionContext requestCompaction=this.store.requestCompaction(1,null); this.store.compact(requestCompaction,NoLimitThroughputController.INSTANCE); Assert.assertEquals(1,this.store.getStorefiles().size()); checkMobHFileEncrytption(this.store.getStorefiles()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Getting data from memstore and files * @throws IOException */ @Test public void testMobCellSizeThreshold() throws IOException { final Configuration conf=HBaseConfiguration.create(); HColumnDescriptor hcd; hcd=new HColumnDescriptor(family); hcd.setMobEnabled(true); hcd.setMobThreshold(100); hcd.setMaxVersions(4); init(name.getMethodName(),conf,hcd,false); this.store.add(new KeyValue(row,family,qf1,1,value)); this.store.add(new KeyValue(row,family,qf2,1,value)); flush(1); this.store.add(new KeyValue(row,family,qf3,1,value)); this.store.add(new KeyValue(row,family,qf4,1,value)); flush(2); this.store.add(new KeyValue(row,family,qf5,1,value)); this.store.add(new KeyValue(row,family,qf6,1,value)); flush(3); Scan scan=new Scan(get); scan.setAttribute(MobConstants.MOB_SCAN_RAW,Bytes.toBytes(Boolean.TRUE)); InternalScanner scanner=(InternalScanner)store.getScanner(scan,scan.getFamilyMap().get(store.getFamily().getName()),0); List results=new ArrayList(); scanner.next(results); Collections.sort(results,KeyValue.COMPARATOR); scanner.close(); Assert.assertEquals(expected.size(),results.size()); for (int i=0; i < results.size(); i++) { Cell cell=results.get(i); Assert.assertFalse(MobUtils.isMobReferenceCell(cell)); Assert.assertEquals(expected.get(i),results.get(i)); Assert.assertEquals(100,store.getFamily().getMobThreshold()); } }

Class: org.apache.hadoop.hbase.regionserver.TestHRegion

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Testcase to check state of region initialization task set to ABORTED or not * if any exceptions during initialization * @throws Exception */ @Test public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception { TableName tableName=TableName.valueOf(name.getMethodName()); HRegionInfo info=null; try { FileSystem fs=Mockito.mock(FileSystem.class); Mockito.when(fs.exists((Path)Mockito.anyObject())).thenThrow(new IOException()); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("cf")); info=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_BYTE_ARRAY,HConstants.EMPTY_BYTE_ARRAY,false); Path path=new Path(dir + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization"); region=HRegion.newHRegion(path,null,fs,CONF,info,htd,null); region.initialize(); fail("Region initialization should fail due to IOException"); } catch ( IOException io) { List tasks=TaskMonitor.get().getTasks(); for ( MonitoredTask monitoredTask : tasks) { if (!(monitoredTask instanceof MonitoredRPCHandler) && monitoredTask.getDescription().contains(region.toString())) { assertTrue("Region state should be ABORTED.",monitoredTask.getState().equals(MonitoredTask.State.ABORTED)); break; } } } finally { HBaseTestingUtility.closeRegionAndWAL(region); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitRegion() throws IOException { byte[] qualifier=Bytes.toBytes("qualifier"); Configuration hc=initSplit(); int numRows=10; byte[][] families={fam1,fam3}; String method=this.getName(); this.region=initHRegion(tableName,method,hc,families); int startRow=100; putData(startRow,numRows,qualifier,families); int splitRow=startRow + numRows; putData(splitRow,numRows,qualifier,families); region.flush(true); HRegion[] regions=null; try { regions=splitRegion(region,Bytes.toBytes("" + splitRow)); for (int i=0; i < regions.length; i++) { regions[i]=HRegion.openHRegion(regions[i],null); } assertEquals(2,regions.length); verifyData(regions[0],startRow,numRows,qualifier,families); verifyData(regions[1],splitRow,numRows,qualifier,families); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSkipRecoveredEditsReplayAllIgnored() throws Exception { byte[] family=Bytes.toBytes("family"); this.region=initHRegion(tableName,method,CONF,family); try { Path regiondir=region.getRegionFileSystem().getRegionDir(); FileSystem fs=region.getRegionFileSystem().getFileSystem(); Path recoveredEditsDir=WALSplitter.getRegionDirRecoveredEditsDir(regiondir); for (int i=1000; i < 1050; i+=10) { Path recoveredEdits=new Path(recoveredEditsDir,String.format("%019d",i)); FSDataOutputStream dos=fs.create(recoveredEdits); dos.writeInt(i); dos.close(); } long minSeqId=2000; Path recoveredEdits=new Path(recoveredEditsDir,String.format("%019d",minSeqId - 1)); FSDataOutputStream dos=fs.create(recoveredEdits); dos.close(); Map maxSeqIdInStores=new TreeMap(Bytes.BYTES_COMPARATOR); for ( Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),minSeqId); } long seqId=region.replayRecoveredEditsIfAny(regiondir,maxSeqIdInStores,null,null); assertEquals(minSeqId,seqId); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test public void testDelete_CheckTimestampUpdated() throws IOException { TableName tableName=TableName.valueOf(name.getMethodName()); byte[] row1=Bytes.toBytes("row1"); byte[] col1=Bytes.toBytes("col1"); byte[] col2=Bytes.toBytes("col2"); byte[] col3=Bytes.toBytes("col3"); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { List kvs=new ArrayList(); kvs.add(new KeyValue(row1,fam1,col1,null)); kvs.add(new KeyValue(row1,fam1,col2,null)); kvs.add(new KeyValue(row1,fam1,col3,null)); NavigableMap> deleteMap=new TreeMap>(Bytes.BYTES_COMPARATOR); deleteMap.put(fam1,kvs); region.delete(deleteMap,Durability.SYNC_WAL); long now=System.currentTimeMillis(); DefaultMemStore memstore=(DefaultMemStore)((HStore)region.getStore(fam1)).memstore; Cell firstCell=memstore.cellSet.first(); assertTrue(firstCell.getTimestamp() <= now); now=firstCell.getTimestamp(); for ( Cell cell : memstore.cellSet) { assertTrue(cell.getTimestamp() <= now); now=cell.getTimestamp(); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReverseScanner_FromMemStore_SingleCF_FullScan() throws IOException { byte[] rowC=Bytes.toBytes("rowC"); byte[] rowA=Bytes.toBytes("rowA"); byte[] rowB=Bytes.toBytes("rowB"); byte[] cf=Bytes.toBytes("CF"); byte[][] families={cf}; byte[] col=Bytes.toBytes("C"); long ts=1; String method=this.getName(); this.region=initHRegion(tableName,method,families); try { KeyValue kv1=new KeyValue(rowC,cf,col,ts,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(rowC,cf,col,ts + 1,KeyValue.Type.Put,null); KeyValue kv2=new KeyValue(rowA,cf,col,ts,KeyValue.Type.Put,null); KeyValue kv3=new KeyValue(rowB,cf,col,ts,KeyValue.Type.Put,null); Put put=null; put=new Put(rowC); put.add(kv1); put.add(kv11); region.put(put); put=new Put(rowA); put.add(kv2); region.put(put); put=new Put(rowB); put.add(kv3); region.put(put); Scan scan=new Scan(); List currRow=new ArrayList(); scan.setReversed(true); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowC,0,rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowB,0,rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowA,0,rowA.length)); assertFalse(hasNext); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testOpenRegionWrittenToWAL() throws Exception { final ServerName serverName=ServerName.valueOf("testOpenRegionWrittenToWAL",100,42); final RegionServerServices rss=spy(TEST_UTIL.createMockRegionServerService(serverName)); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); htd.addFamily(new HColumnDescriptor(fam1)); htd.addFamily(new HColumnDescriptor(fam2)); HRegionInfo hri=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_BYTE_ARRAY,HConstants.EMPTY_BYTE_ARRAY); HRegion region=HBaseTestingUtility.createRegionAndWAL(hri,TEST_UTIL.getDataTestDir(),TEST_UTIL.getConfiguration(),htd); assertNotNull(region); region.put(new Put(Bytes.toBytes("a")).addColumn(fam1,fam1,fam1)); region.flush(true); HBaseTestingUtility.closeRegionAndWAL(region); ArgumentCaptor editCaptor=ArgumentCaptor.forClass(WALEdit.class); WAL wal=mockWAL(); when(rss.getWAL((HRegionInfo)any())).thenReturn(wal); try { region=HRegion.openHRegion(hri,htd,rss.getWAL(hri),TEST_UTIL.getConfiguration(),rss,null); verify(wal,times(1)).append((HTableDescriptor)any(),(HRegionInfo)any(),(WALKey)any(),editCaptor.capture(),anyBoolean()); WALEdit edit=editCaptor.getValue(); assertNotNull(edit); assertNotNull(edit.getCells()); assertEquals(1,edit.getCells().size()); RegionEventDescriptor desc=WALEdit.getRegionEventDescriptor(edit.getCells().get(0)); assertNotNull(desc); LOG.info("RegionEventDescriptor from WAL: " + desc); assertEquals(RegionEventDescriptor.EventType.REGION_OPEN,desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(),htd.getName())); assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName,ProtobufUtil.toServerName(desc.getServer())); assertEquals(2,desc.getStoresCount()); StoreDescriptor store=desc.getStores(0); assertTrue(Bytes.equals(store.getFamilyName().toByteArray(),fam1)); assertEquals(store.getStoreHomeDir(),Bytes.toString(fam1)); assertEquals(1,store.getStoreFileCount()); assertFalse(store.getStoreFile(0).contains("/")); store=desc.getStores(1); assertTrue(Bytes.equals(store.getFamilyName().toByteArray(),fam2)); assertEquals(store.getStoreHomeDir(),Bytes.toString(fam2)); assertEquals(0,store.getStoreFileCount()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("qualifier2"); byte[] fam1=Bytes.toBytes("fam1"); byte[][] families={fam1}; long ts1=1; long ts2=ts1 + 1; long ts3=ts1 + 2; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { Put put=null; KeyValue kv13=new KeyValue(row1,fam1,qf1,ts3,KeyValue.Type.Put,null); KeyValue kv12=new KeyValue(row1,fam1,qf1,ts2,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(row1,fam1,qf1,ts1,KeyValue.Type.Put,null); KeyValue kv23=new KeyValue(row1,fam1,qf2,ts3,KeyValue.Type.Put,null); KeyValue kv22=new KeyValue(row1,fam1,qf2,ts2,KeyValue.Type.Put,null); KeyValue kv21=new KeyValue(row1,fam1,qf2,ts1,KeyValue.Type.Put,null); put=new Put(row1); put.add(kv13); put.add(kv12); put.add(kv11); put.add(kv23); put.add(kv22); put.add(kv21); region.put(put); region.flush(true); List expected=new ArrayList(); expected.add(kv13); expected.add(kv12); expected.add(kv23); expected.add(kv22); Scan scan=new Scan(row1); scan.addColumn(fam1,qf1); scan.addColumn(fam1,qf2); scan.setMaxVersions(MAX_VERSIONS); List actual=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(actual); assertEquals(false,hasNext); for (int i=0; i < expected.size(); i++) { assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i),actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckAndRowMutateTimestampsAreMonotonic() throws IOException { HRegion region=initHRegion(tableName,name.getMethodName(),CONF,fam1); ManualEnvironmentEdge edge=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); edge.setValue(10); Put p=new Put(row); p.setDurability(Durability.SKIP_WAL); p.addColumn(fam1,qual1,qual1); region.put(p); Result result=region.get(new Get(row)); Cell c=result.getColumnLatestCell(fam1,qual1); assertNotNull(c); assertEquals(c.getTimestamp(),10L); edge.setValue(1); p=new Put(row); p.setDurability(Durability.SKIP_WAL); p.addColumn(fam1,qual1,qual2); RowMutations rm=new RowMutations(row); rm.add(p); assertTrue(region.checkAndRowMutate(row,fam1,qual1,CompareOp.EQUAL,new BinaryComparator(qual1),rm,false)); result=region.get(new Get(row)); c=result.getColumnLatestCell(fam1,qual1); assertEquals(c.getTimestamp(),10L); LOG.info("c value " + Bytes.toStringBinary(c.getValueArray(),c.getValueOffset(),c.getValueLength())); assertTrue(Bytes.equals(c.getValueArray(),c.getValueOffset(),c.getValueLength(),qual2,0,qual2.length)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCheckAndMutate_WithCorrectValue() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] qf1=Bytes.toBytes("qualifier"); byte[] val1=Bytes.toBytes("value1"); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { Put put=new Put(row1); put.addColumn(fam1,qf1,val1); region.put(put); boolean res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val1),put,true); assertEquals(true,res); Delete delete=new Delete(row1); delete.addColumn(fam1,qf1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val1),delete,true); assertEquals(true,res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCheckAndPut_ThatPutWasWritten() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] fam2=Bytes.toBytes("fam2"); byte[] qf1=Bytes.toBytes("qualifier"); byte[] val1=Bytes.toBytes("value1"); byte[] val2=Bytes.toBytes("value2"); byte[][] families={fam1,fam2}; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { Put put=new Put(row1); put.addColumn(fam1,qf1,val1); region.put(put); long ts=System.currentTimeMillis(); KeyValue kv=new KeyValue(row1,fam2,qf1,ts,KeyValue.Type.Put,val2); put=new Put(row1); put.add(kv); boolean res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val1),put,true); assertEquals(true,res); Get get=new Get(row1); get.addColumn(fam2,qf1); Cell[] actual=region.get(get).rawCells(); Cell[] expected={kv}; assertEquals(expected.length,actual.length); for (int i=0; i < actual.length; i++) { assertEquals(expected[i],actual[i]); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCheckAndDelete_ThatDeleteWasWritten() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] fam2=Bytes.toBytes("fam2"); byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("qualifier2"); byte[] qf3=Bytes.toBytes("qualifier3"); byte[] val1=Bytes.toBytes("value1"); byte[] val2=Bytes.toBytes("value2"); byte[] val3=Bytes.toBytes("value3"); byte[] emptyVal=new byte[]{}; byte[][] families={fam1,fam2}; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { Put put=new Put(row1); put.addColumn(fam1,qf1,val1); region.put(put); Threads.sleep(2); put=new Put(row1); put.addColumn(fam1,qf1,val2); put.addColumn(fam2,qf1,val3); put.addColumn(fam2,qf2,val2); put.addColumn(fam2,qf3,val1); put.addColumn(fam1,qf3,val1); region.put(put); Delete delete=new Delete(row1); delete.addColumn(fam1,qf1); delete.addColumn(fam2,qf1); delete.addColumn(fam1,qf3); boolean res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val2),delete,true); assertEquals(true,res); Get get=new Get(row1); get.addColumn(fam1,qf1); get.addColumn(fam1,qf3); get.addColumn(fam2,qf2); Result r=region.get(get); assertEquals(2,r.size()); assertArrayEquals(val1,r.getValue(fam1,qf1)); assertArrayEquals(val2,r.getValue(fam2,qf2)); delete=new Delete(row1); delete.addFamily(fam2); res=region.checkAndMutate(row1,fam2,qf1,CompareOp.EQUAL,new BinaryComparator(emptyVal),delete,true); assertEquals(true,res); get=new Get(row1); r=region.get(get); assertEquals(1,r.size()); assertArrayEquals(val1,r.getValue(fam1,qf1)); delete=new Delete(row1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val1),delete,true); assertEquals(true,res); get=new Get(row1); r=region.get(get); assertEquals(0,r.size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGet_Basic() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] col1=Bytes.toBytes("col1"); byte[] col2=Bytes.toBytes("col2"); byte[] col3=Bytes.toBytes("col3"); byte[] col4=Bytes.toBytes("col4"); byte[] col5=Bytes.toBytes("col5"); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { Put put=new Put(row1); put.addColumn(fam1,col1,null); put.addColumn(fam1,col2,null); put.addColumn(fam1,col3,null); put.addColumn(fam1,col4,null); put.addColumn(fam1,col5,null); region.put(put); Get get=new Get(row1); get.addColumn(fam1,col2); get.addColumn(fam1,col4); KeyValue kv1=new KeyValue(row1,fam1,col2); KeyValue kv2=new KeyValue(row1,fam1,col4); KeyValue[] expected={kv1,kv2}; Result res=region.get(get); assertEquals(expected.length,res.size()); for (int i=0; i < res.size(); i++) { assertTrue(CellUtil.matchingRow(expected[i],res.rawCells()[i])); assertTrue(CellUtil.matchingFamily(expected[i],res.rawCells()[i])); assertTrue(CellUtil.matchingQualifier(expected[i],res.rawCells()[i])); } Get g=new Get(row1); final int count=2; g.setFilter(new ColumnCountGetFilter(count)); res=region.get(g); assertEquals(count,res.size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Added for HBASE-5416 * Here we test scan optimization when only subset of CFs are used in filter * conditions. */ @Test public void testScanner_JoinedScanners() throws IOException { byte[] cf_essential=Bytes.toBytes("essential"); byte[] cf_joined=Bytes.toBytes("joined"); byte[] cf_alpha=Bytes.toBytes("alpha"); this.region=initHRegion(tableName,getName(),CONF,cf_essential,cf_joined,cf_alpha); try { byte[] row1=Bytes.toBytes("row1"); byte[] row2=Bytes.toBytes("row2"); byte[] row3=Bytes.toBytes("row3"); byte[] col_normal=Bytes.toBytes("d"); byte[] col_alpha=Bytes.toBytes("a"); byte[] filtered_val=Bytes.toBytes(3); Put put=new Put(row1); put.addColumn(cf_essential,col_normal,Bytes.toBytes(1)); put.addColumn(cf_joined,col_alpha,Bytes.toBytes(1)); region.put(put); put=new Put(row2); put.addColumn(cf_essential,col_alpha,Bytes.toBytes(2)); put.addColumn(cf_joined,col_normal,Bytes.toBytes(2)); put.addColumn(cf_alpha,col_alpha,Bytes.toBytes(2)); region.put(put); put=new Put(row3); put.addColumn(cf_essential,col_normal,filtered_val); put.addColumn(cf_joined,col_normal,filtered_val); region.put(put); Scan scan=new Scan(); Filter filter=new SingleColumnValueExcludeFilter(cf_essential,col_normal,CompareOp.NOT_EQUAL,filtered_val); scan.setFilter(filter); scan.setLoadColumnFamiliesOnDemand(true); InternalScanner s=region.getScanner(scan); List results=new ArrayList(); assertTrue(s.next(results)); assertEquals(results.size(),1); results.clear(); assertTrue(s.next(results)); assertEquals(results.size(),3); assertTrue("orderCheck",CellUtil.matchingFamily(results.get(0),cf_alpha)); assertTrue("orderCheck",CellUtil.matchingFamily(results.get(1),cf_essential)); assertTrue("orderCheck",CellUtil.matchingFamily(results.get(2),cf_joined)); results.clear(); assertFalse(s.next(results)); assertEquals(results.size(),0); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReverseScanner_smaller_blocksize() throws IOException { byte[] rowA=Bytes.toBytes("rowA"); byte[] rowB=Bytes.toBytes("rowB"); byte[] rowC=Bytes.toBytes("rowC"); byte[] rowD=Bytes.toBytes("rowD"); byte[] rowE=Bytes.toBytes("rowE"); byte[] cf=Bytes.toBytes("CF"); byte[][] families={cf}; byte[] col1=Bytes.toBytes("col1"); byte[] col2=Bytes.toBytes("col2"); long ts=1; String method=this.getName(); HBaseConfiguration config=new HBaseConfiguration(); config.setInt("test.block.size",1); this.region=initHRegion(tableName,method,config,families); try { KeyValue kv1=new KeyValue(rowA,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv2=new KeyValue(rowB,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv3=new KeyValue(rowC,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv4_1=new KeyValue(rowD,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv4_2=new KeyValue(rowD,cf,col2,ts,KeyValue.Type.Put,null); KeyValue kv5=new KeyValue(rowE,cf,col1,ts,KeyValue.Type.Put,null); Put put=null; put=new Put(rowA); put.add(kv1); region.put(put); put=new Put(rowB); put.add(kv2); region.put(put); put=new Put(rowC); put.add(kv3); region.put(put); put=new Put(rowD); put.add(kv4_1); region.put(put); put=new Put(rowD); put.add(kv4_2); region.put(put); put=new Put(rowE); put.add(kv5); region.put(put); region.flush(true); Scan scan=new Scan(rowD,rowA); scan.addColumn(families[0],col1); scan.setReversed(true); List currRow=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowD,0,rowD.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowC,0,rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowB,0,rowB.length)); assertFalse(hasNext); scanner.close(); scan=new Scan(rowD,rowA); scan.addColumn(families[0],col2); scan.setReversed(true); currRow.clear(); scanner=region.getScanner(scan); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowD,0,rowD.length)); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testAppendTimestampsAreMonotonic() throws IOException { HRegion region=initHRegion(tableName,name.getMethodName(),CONF,fam1); ManualEnvironmentEdge edge=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); edge.setValue(10); Append a=new Append(row); a.setDurability(Durability.SKIP_WAL); a.add(fam1,qual1,qual1); region.append(a); Result result=region.get(new Get(row)); Cell c=result.getColumnLatestCell(fam1,qual1); assertNotNull(c); assertEquals(c.getTimestamp(),10L); edge.setValue(1); region.append(a); result=region.get(new Get(row)); c=result.getColumnLatestCell(fam1,qual1); assertEquals(c.getTimestamp(),10L); byte[] expected=new byte[qual1.length * 2]; System.arraycopy(qual1,0,expected,0,qual1.length); System.arraycopy(qual1,0,expected,qual1.length,qual1.length); assertTrue(Bytes.equals(c.getValueArray(),c.getValueOffset(),c.getValueLength(),expected,0,expected.length)); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
/** * Splits twice and verifies getting from each of the split regions. * @throws Exception */ @Test public void testBasicSplit() throws Exception { byte[][] families={fam1,fam2,fam3}; Configuration hc=initSplit(); String method=this.getName(); this.region=initHRegion(tableName,method,hc,families); try { LOG.info("" + HBaseTestCase.addContent(region,fam3)); region.flush(true); region.compactStores(); byte[] splitRow=region.checkSplit(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); HRegion[] regions=splitRegion(region,splitRow); try { for (int i=0; i < regions.length; i++) { regions[i]=HRegion.openHRegion(regions[i],null); } assertGet(regions[0],fam3,Bytes.toBytes(START_KEY)); assertGet(regions[1],fam3,splitRow); assertScan(regions[0],fam3,Bytes.toBytes(START_KEY)); assertScan(regions[1],fam3,splitRow); for (int i=0; i < regions.length; i++) { for (int j=0; j < 2; j++) { HBaseTestCase.addContent(regions[i],fam3); } HBaseTestCase.addContent(regions[i],fam2); HBaseTestCase.addContent(regions[i],fam1); regions[i].flush(true); } byte[][] midkeys=new byte[regions.length][]; for (int i=0; i < regions.length; i++) { regions[i].compactStores(); midkeys[i]=regions[i].checkSplit(); } TreeMap sortedMap=new TreeMap(); for (int i=0; i < regions.length; i++) { HRegion[] rs=null; if (midkeys[i] != null) { rs=splitRegion(regions[i],midkeys[i]); for (int j=0; j < rs.length; j++) { sortedMap.put(Bytes.toString(rs[j].getRegionInfo().getRegionName()),HRegion.openHRegion(rs[j],null)); } } } LOG.info("Made 4 regions"); int interval=(LAST_CHAR - FIRST_CHAR) / 3; byte[] b=Bytes.toBytes(START_KEY); for ( HRegion r : sortedMap.values()) { assertGet(r,fam3,b); b[0]+=interval; } } finally { for (int i=0; i < regions.length; i++) { try { regions[i].close(); } catch ( IOException e) { } } } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[][] families={fam1}; byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("qualifier2"); long ts1=1; long ts2=ts1 + 1; long ts3=ts1 + 2; long ts4=ts1 + 3; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { KeyValue kv14=new KeyValue(row1,fam1,qf1,ts4,KeyValue.Type.Put,null); KeyValue kv13=new KeyValue(row1,fam1,qf1,ts3,KeyValue.Type.Put,null); KeyValue kv12=new KeyValue(row1,fam1,qf1,ts2,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(row1,fam1,qf1,ts1,KeyValue.Type.Put,null); KeyValue kv24=new KeyValue(row1,fam1,qf2,ts4,KeyValue.Type.Put,null); KeyValue kv23=new KeyValue(row1,fam1,qf2,ts3,KeyValue.Type.Put,null); KeyValue kv22=new KeyValue(row1,fam1,qf2,ts2,KeyValue.Type.Put,null); KeyValue kv21=new KeyValue(row1,fam1,qf2,ts1,KeyValue.Type.Put,null); Put put=null; put=new Put(row1); put.add(kv14); put.add(kv24); region.put(put); region.flush(true); put=new Put(row1); put.add(kv23); put.add(kv13); region.put(put); region.flush(true); put=new Put(row1); put.add(kv22); put.add(kv12); region.put(put); region.flush(true); put=new Put(row1); put.add(kv21); put.add(kv11); region.put(put); List expected=new ArrayList(); expected.add(kv14); expected.add(kv13); expected.add(kv12); expected.add(kv24); expected.add(kv23); expected.add(kv22); Scan scan=new Scan(row1); scan.addColumn(fam1,qf1); scan.addColumn(fam1,qf2); int versions=3; scan.setMaxVersions(versions); List actual=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(actual); assertEquals(false,hasNext); for (int i=0; i < expected.size(); i++) { assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i),actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Writes very wide records and gets the latest row every time.. Flushes and * compacts the region aggressivly to catch issues. * @throws IOExceptionby flush / scan / compaction * @throws InterruptedExceptionwhen joining threads */ @Test public void testWritesWhileGetting() throws Exception { int testCount=50; int numRows=1; int numFamilies=10; int numQualifiers=100; int compactInterval=100; byte[][] families=new byte[numFamilies][]; for (int i=0; i < numFamilies; i++) { families[i]=Bytes.toBytes("family" + i); } byte[][] qualifiers=new byte[numQualifiers][]; for (int i=0; i < numQualifiers; i++) { qualifiers[i]=Bytes.toBytes("qual" + i); } String method="testWritesWhileGetting"; Configuration conf=HBaseConfiguration.create(CONF); conf.setInt("hbase.hstore.compaction.min",1); conf.setInt("hbase.hstore.compaction.max",1000); this.region=initHRegion(tableName,method,conf,families); PutThread putThread=null; MultithreadedTestUtil.TestContext ctx=new MultithreadedTestUtil.TestContext(conf); try { putThread=new PutThread(numRows,families,qualifiers); putThread.start(); putThread.waitForFirstPut(); ctx.addThread(new RepeatingTestThread(ctx){ private int flushesSinceCompact=0; private final int maxFlushesSinceCompact=20; @Override public void doAnAction() throws Exception { if (region.flush(true).isCompactionNeeded()) { ++flushesSinceCompact; } if (flushesSinceCompact == maxFlushesSinceCompact) { region.compact(false); flushesSinceCompact=0; } } } ); ctx.startThreads(); Get get=new Get(Bytes.toBytes("row0")); Result result=null; int expectedCount=numFamilies * numQualifiers; long prevTimestamp=0L; for (int i=0; i < testCount; i++) { LOG.info("testWritesWhileGetting verify turn " + i); boolean previousEmpty=result == null || result.isEmpty(); result=region.get(get); if (!result.isEmpty() || !previousEmpty || i > compactInterval) { assertEquals("i=" + i,expectedCount,result.size()); long timestamp=0; for ( Cell kv : result.rawCells()) { if (CellUtil.matchingFamily(kv,families[0]) && CellUtil.matchingQualifier(kv,qualifiers[0])) { timestamp=kv.getTimestamp(); } } assertTrue(timestamp >= prevTimestamp); prevTimestamp=timestamp; Cell previousKV=null; for ( Cell kv : result.rawCells()) { byte[] thisValue=CellUtil.cloneValue(kv); if (previousKV != null) { if (Bytes.compareTo(CellUtil.cloneValue(previousKV),thisValue) != 0) { LOG.warn("These two KV should have the same value." + " Previous KV:" + previousKV + "(memStoreTS:"+ previousKV.getSequenceId()+ ")"+ ", New KV: "+ kv+ "(memStoreTS:"+ kv.getSequenceId()+ ")"); assertEquals(0,Bytes.compareTo(CellUtil.cloneValue(previousKV),thisValue)); } } previousKV=kv; } } } } finally { if (putThread != null) putThread.done(); region.flush(true); if (putThread != null) { putThread.join(); putThread.checkNoError(); } ctx.stop(); HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test case to check increment function with memstore flushing * @throws Exception */ @Test public void testParallelIncrementWithMemStoreFlush() throws Exception { byte[] family=Incrementer.family; this.region=initHRegion(tableName,method,CONF,family); final HRegion region=this.region; final AtomicBoolean incrementDone=new AtomicBoolean(false); Runnable flusher=new Runnable(){ @Override public void run(){ while (!incrementDone.get()) { try { region.flush(true); } catch ( Exception e) { e.printStackTrace(); } } } } ; int threadNum=20; int incCounter=100; long expected=threadNum * incCounter; Thread[] incrementers=new Thread[threadNum]; Thread flushThread=new Thread(flusher); for (int i=0; i < threadNum; i++) { incrementers[i]=new Thread(new Incrementer(this.region,incCounter)); incrementers[i].start(); } flushThread.start(); for (int i=0; i < threadNum; i++) { incrementers[i].join(); } incrementDone.set(true); flushThread.join(); Get get=new Get(Incrementer.incRow); get.addColumn(Incrementer.family,Incrementer.qualifier); get.setMaxVersions(1); Result res=this.region.get(get); List kvs=res.getColumnCells(Incrementer.family,Incrementer.qualifier); assertEquals(kvs.size(),1); Cell kv=kvs.get(0); assertEquals(expected,Bytes.toLong(kv.getValueArray(),kv.getValueOffset())); this.region=null; }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBatchPut_whileNoRowLocksHeld() throws IOException { byte[] cf=Bytes.toBytes(COLUMN_FAMILY); byte[] qual=Bytes.toBytes("qual"); byte[] val=Bytes.toBytes("val"); this.region=initHRegion(TableName.valueOf(getName()),getName(),CONF,cf); MetricsWALSource source=CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); try { long syncs=metricsAssertHelper.getCounter("syncTimeNumOps",source); metricsAssertHelper.assertCounter("syncTimeNumOps",syncs,source); LOG.info("First a batch put with all valid puts"); final Put[] puts=new Put[10]; for (int i=0; i < 10; i++) { puts[i]=new Put(Bytes.toBytes("row_" + i)); puts[i].addColumn(cf,qual,val); } OperationStatus[] codes=this.region.batchMutate(puts); assertEquals(10,codes.length); for (int i=0; i < 10; i++) { assertEquals(OperationStatusCode.SUCCESS,codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps",syncs + 1,source); LOG.info("Next a batch put with one invalid family"); puts[5].addColumn(Bytes.toBytes("BAD_CF"),qual,val); codes=this.region.batchMutate(puts); assertEquals(10,codes.length); for (int i=0; i < 10; i++) { assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS,codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps",syncs + 2,source); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verifies that the .regioninfo file is written on region creation and that * is recreated if missing during region opening. */ @Test public void testRegionInfoFileCreation() throws IOException { Path rootDir=new Path(dir + "testRegionInfoFileCreation"); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testtb")); htd.addFamily(new HColumnDescriptor("cf")); HRegionInfo hri=new HRegionInfo(htd.getTableName()); HRegion region=HBaseTestingUtility.createRegionAndWAL(hri,rootDir,CONF,htd,false); Path regionDir=region.getRegionFileSystem().getRegionDir(); FileSystem fs=region.getRegionFileSystem().getFileSystem(); HBaseTestingUtility.closeRegionAndWAL(region); Path regionInfoFile=new Path(regionDir,HRegionFileSystem.REGION_INFO_FILE); assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir",fs.exists(regionInfoFile)); region=HRegion.openHRegion(rootDir,hri,htd,null,CONF); assertEquals(regionDir,region.getRegionFileSystem().getRegionDir()); HBaseTestingUtility.closeRegionAndWAL(region); assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir",fs.exists(regionInfoFile)); fs.delete(regionInfoFile,true); assertFalse(HRegionFileSystem.REGION_INFO_FILE + " should be removed from the region dir",fs.exists(regionInfoFile)); region=HRegion.openHRegion(rootDir,hri,htd,null,CONF); assertEquals(regionDir,region.getRegionFileSystem().getRegionDir()); HBaseTestingUtility.closeRegionAndWAL(region); assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir",fs.exists(new Path(regionDir,HRegionFileSystem.REGION_INFO_FILE))); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that I can use the max flushed sequence id after the close. * @throws IOException */ @Test(timeout=100000) public void testSequenceId() throws IOException { HRegion region=initHRegion(tableName,name.getMethodName(),CONF,COLUMN_FAMILY_BYTES); assertEquals(HConstants.NO_SEQNUM,region.getMaxFlushedSeqId()); assertEquals(0,(long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); region.close(); assertEquals(HConstants.NO_SEQNUM,region.getMaxFlushedSeqId()); assertEquals(0,(long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); region=initHRegion(tableName,name.getMethodName(),CONF,COLUMN_FAMILY_BYTES); byte[] value=Bytes.toBytes(name.getMethodName()); Put put=new Put(value); put.addColumn(COLUMN_FAMILY_BYTES,null,value); region.put(put); assertEquals(HConstants.NO_SEQNUM,region.getMaxFlushedSeqId()); assertEquals(0,(long)region.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES)); region.flush(true); long max=region.getMaxFlushedSeqId(); region.close(); assertEquals(max,region.getMaxFlushedSeqId()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test we do not lose data if we fail a flush and then close. * Part of HBase-10466. Tests the following from the issue description: * "Bug 1: Wrong calculation of HRegion.memstoreSize: When a flush fails, data to be flushed is * kept in each MemStore's snapshot and wait for next flush attempt to continue on it. But when * the next flush succeeds, the counter of total memstore size in HRegion is always deduced by * the sum of current memstore sizes instead of snapshots left from previous failed flush. This * calculation is problematic that almost every time there is failed flush, HRegion.memstoreSize * gets reduced by a wrong value. If region flush could not proceed for a couple cycles, the size * in current memstore could be much larger than the snapshot. It's likely to drift memstoreSize * much smaller than expected. In extreme case, if the error accumulates to even bigger than * HRegion's memstore size limit, any further flush is skipped because flush does not do anything * if memstoreSize is not larger than 0." * @throws Exception */ @Test(timeout=60000) public void testFlushSizeAccounting() throws Exception { final Configuration conf=HBaseConfiguration.create(CONF); final String callingMethod=name.getMethodName(); final WAL wal=createWALCompatibleWithFaultyFileSystem(callingMethod,conf,tableName); conf.setInt("hbase.hstore.flush.retries.number",1); final User user=User.createUserForTesting(conf,this.name.getMethodName(),new String[]{"foo"}); conf.setClass("fs.file.impl",FaultyFileSystem.class,FileSystem.class); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { FileSystem fs=FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class,fs.getClass()); FaultyFileSystem ffs=(FaultyFileSystem)fs; HRegion region=null; try { region=initHRegion(tableName,null,null,callingMethod,conf,false,Durability.SYNC_WAL,wal,COLUMN_FAMILY_BYTES); long size=region.getMemstoreSize(); Assert.assertEquals(0,size); Put p1=new Put(row); p1.add(new KeyValue(row,COLUMN_FAMILY_BYTES,qual1,1,(byte[])null)); region.put(p1); final long sizeOfOnePut=region.getMemstoreSize(); try { LOG.info("Flushing"); region.flush(true); Assert.fail("Didn't bubble up IOE!"); } catch ( DroppedSnapshotException dse) { region.closing.set(false); } ffs.fault.set(false); Assert.assertEquals(sizeOfOnePut,region.getMemstoreSize()); Put p2=new Put(row); p2.add(new KeyValue(row,COLUMN_FAMILY_BYTES,qual2,2,(byte[])null)); p2.add(new KeyValue(row,COLUMN_FAMILY_BYTES,qual3,3,(byte[])null)); region.put(p2); Assert.assertEquals(sizeOfOnePut * 3,region.getMemstoreSize()); region.flush(true); Assert.assertEquals(sizeOfOnePut * 2,region.getMemstoreSize()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } return null; } } ); FileSystem.closeAllForUGI(user.getUGI()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
/** * Test that we get the expected flush results back * @throws IOException */ @Test public void testFlushResult() throws IOException { String method=name.getMethodName(); TableName tableName=TableName.valueOf(method); byte[] family=Bytes.toBytes("family"); this.region=initHRegion(tableName,method,family); HRegion.FlushResult fr=region.flush(true); assertFalse(fr.isFlushSucceeded()); assertFalse(fr.isCompactionNeeded()); for (int i=0; i < 2; i++) { Put put=new Put(tableName.toBytes()).addColumn(family,family,tableName.toBytes()); region.put(put); fr=region.flush(true); assertTrue(fr.isFlushSucceeded()); assertFalse(fr.isCompactionNeeded()); } for (int i=0; i < 2; i++) { Put put=new Put(tableName.toBytes()).addColumn(family,family,tableName.toBytes()); region.put(put); fr=region.flush(true); assertTrue(fr.isFlushSucceeded()); assertTrue(fr.isCompactionNeeded()); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test for Bug 2 of HBASE-10466. * "Bug 2: Conditions for the first flush of region close (so-called pre-flush) If memstoreSize * is smaller than a certain value, or when region close starts a flush is ongoing, the first * flush is skipped and only the second flush takes place. However, two flushes are required in * case previous flush fails and leaves some data in snapshot. The bug could cause loss of data * in current memstore. The fix is removing all conditions except abort check so we ensure 2 * flushes for region close." * @throws IOException */ @Test(timeout=60000) public void testCloseCarryingSnapshot() throws IOException { HRegion region=initHRegion(tableName,name.getMethodName(),CONF,COLUMN_FAMILY_BYTES); Store store=region.getStore(COLUMN_FAMILY_BYTES); byte[] value=Bytes.toBytes(name.getMethodName()); Put put=new Put(value); put.addColumn(COLUMN_FAMILY_BYTES,null,value); region.put(put); StoreFlushContext storeFlushCtx=store.createFlushContext(12345); storeFlushCtx.prepare(); put.addColumn(COLUMN_FAMILY_BYTES,Bytes.toBytes("abc"),value); region.put(put); region.close(); assertEquals(0,region.getMemstoreSize()); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCloseRegionWrittenToWAL() throws Exception { final ServerName serverName=ServerName.valueOf("testCloseRegionWrittenToWAL",100,42); final RegionServerServices rss=spy(TEST_UTIL.createMockRegionServerService(serverName)); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWAL")); htd.addFamily(new HColumnDescriptor(fam1)); htd.addFamily(new HColumnDescriptor(fam2)); final HRegionInfo hri=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_BYTE_ARRAY,HConstants.EMPTY_BYTE_ARRAY); ArgumentCaptor editCaptor=ArgumentCaptor.forClass(WALEdit.class); WAL wal=mockWAL(); when(rss.getWAL((HRegionInfo)any())).thenReturn(wal); region=HRegion.openHRegion(hri,htd,rss.getWAL(hri),TEST_UTIL.getConfiguration(),rss,null); region.close(false); verify(wal,times(2)).append((HTableDescriptor)any(),(HRegionInfo)any(),(WALKey)any(),editCaptor.capture(),anyBoolean()); WALEdit edit=editCaptor.getAllValues().get(1); assertNotNull(edit); assertNotNull(edit.getCells()); assertEquals(1,edit.getCells().size()); RegionEventDescriptor desc=WALEdit.getRegionEventDescriptor(edit.getCells().get(0)); assertNotNull(desc); LOG.info("RegionEventDescriptor from WAL: " + desc); assertEquals(RegionEventDescriptor.EventType.REGION_CLOSE,desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(),htd.getName())); assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName,ProtobufUtil.toServerName(desc.getServer())); assertEquals(2,desc.getStoresCount()); StoreDescriptor store=desc.getStores(0); assertTrue(Bytes.equals(store.getFamilyName().toByteArray(),fam1)); assertEquals(store.getStoreHomeDir(),Bytes.toString(fam1)); assertEquals(0,store.getStoreFileCount()); store=desc.getStores(1); assertTrue(Bytes.equals(store.getFamilyName().toByteArray(),fam2)); assertEquals(store.getStoreHomeDir(),Bytes.toString(fam2)); assertEquals(0,store.getStoreFileCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckAndMutateTimestampsAreMonotonic() throws IOException { HRegion region=initHRegion(tableName,name.getMethodName(),CONF,fam1); ManualEnvironmentEdge edge=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); edge.setValue(10); Put p=new Put(row); p.setDurability(Durability.SKIP_WAL); p.addColumn(fam1,qual1,qual1); region.put(p); Result result=region.get(new Get(row)); Cell c=result.getColumnLatestCell(fam1,qual1); assertNotNull(c); assertEquals(c.getTimestamp(),10L); edge.setValue(1); p=new Put(row); p.setDurability(Durability.SKIP_WAL); p.addColumn(fam1,qual1,qual2); region.checkAndMutate(row,fam1,qual1,CompareOp.EQUAL,new BinaryComparator(qual1),p,false); result=region.get(new Get(row)); c=result.getColumnLatestCell(fam1,qual1); assertEquals(c.getTimestamp(),10L); assertTrue(Bytes.equals(c.getValueArray(),c.getValueOffset(),c.getValueLength(),qual2,0,qual2.length)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for HBASE-14229: Flushing canceled by coprocessor still leads to memstoreSize set down */ @Test public void testMemstoreSizeWithFlushCanceling() throws IOException { FileSystem fs=FileSystem.get(CONF); Path rootDir=new Path(dir + "testMemstoreSizeWithFlushCanceling"); FSHLog hLog=new FSHLog(fs,rootDir,"testMemstoreSizeWithFlushCanceling",CONF); HRegion region=initHRegion(tableName,null,null,name.getMethodName(),CONF,false,Durability.SYNC_WAL,hLog,COLUMN_FAMILY_BYTES); Store store=region.getStore(COLUMN_FAMILY_BYTES); assertEquals(0,region.getMemstoreSize()); byte[] value=Bytes.toBytes(name.getMethodName()); Put put=new Put(value); put.addColumn(COLUMN_FAMILY_BYTES,Bytes.toBytes("abc"),value); region.put(put); long onePutSize=region.getMemstoreSize(); assertTrue(onePutSize > 0); region.flush(true); assertEquals("memstoreSize should be zero",0,region.getMemstoreSize()); assertEquals("flushable size should be zero",0,store.getFlushableSize()); RegionCoprocessorHost normalCPHost=region.getCoprocessorHost(); RegionCoprocessorHost mockedCPHost=Mockito.mock(RegionCoprocessorHost.class); when(mockedCPHost.preFlush(Mockito.isA(HStore.class),Mockito.isA(InternalScanner.class))).thenReturn(null); region.setCoprocessorHost(mockedCPHost); region.put(put); region.flush(true); assertEquals("memstoreSize should NOT be zero",onePutSize,region.getMemstoreSize()); assertEquals("flushable size should NOT be zero",onePutSize,store.getFlushableSize()); region.setCoprocessorHost(normalCPHost); region.flush(true); assertEquals("memstoreSize should be zero",0,region.getMemstoreSize()); assertEquals("flushable size should be zero",0,store.getFlushableSize()); HBaseTestingUtility.closeRegionAndWAL(region); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCellTTLs() throws IOException { IncrementingEnvironmentEdge edge=new IncrementingEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); final byte[] row=Bytes.toBytes("testRow"); final byte[] q1=Bytes.toBytes("q1"); final byte[] q2=Bytes.toBytes("q2"); final byte[] q3=Bytes.toBytes("q3"); final byte[] q4=Bytes.toBytes("q4"); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testCellTTLs")); HColumnDescriptor hcd=new HColumnDescriptor(fam1); hcd.setTimeToLive(10); htd.addFamily(hcd); Configuration conf=new Configuration(TEST_UTIL.getConfiguration()); conf.setInt(HFile.FORMAT_VERSION_KEY,HFile.MIN_FORMAT_VERSION_WITH_TAGS); HRegion region=HBaseTestingUtility.createRegionAndWAL(new HRegionInfo(htd.getTableName(),HConstants.EMPTY_BYTE_ARRAY,HConstants.EMPTY_BYTE_ARRAY),TEST_UTIL.getDataTestDir(),conf,htd); assertNotNull(region); try { long now=EnvironmentEdgeManager.currentTime(); region.put(new Put(row).add(new KeyValue(row,fam1,q1,now,HConstants.EMPTY_BYTE_ARRAY,new ArrayBackedTag[]{new ArrayBackedTag(TagType.TTL_TAG_TYPE,Bytes.toBytes(5000L))}))); region.put(new Put(row).addColumn(fam1,q2,now,HConstants.EMPTY_BYTE_ARRAY)); region.put(new Put(row).add(new KeyValue(row,fam1,q3,now + 10000 - 1,HConstants.EMPTY_BYTE_ARRAY,new ArrayBackedTag[]{new ArrayBackedTag(TagType.TTL_TAG_TYPE,Bytes.toBytes(5000L))}))); region.put(new Put(row).addColumn(fam1,q4,now + 10000 - 1,HConstants.EMPTY_BYTE_ARRAY)); region.flush(true); Result r=region.get(new Get(row)); assertNotNull(r.getValue(fam1,q1)); assertNotNull(r.getValue(fam1,q2)); assertNotNull(r.getValue(fam1,q3)); assertNotNull(r.getValue(fam1,q4)); edge.incrementTime(5000); r=region.get(new Get(row)); assertNull(r.getValue(fam1,q1)); assertNotNull(r.getValue(fam1,q2)); assertNotNull(r.getValue(fam1,q3)); assertNotNull(r.getValue(fam1,q4)); edge.incrementTime(5000); r=region.get(new Get(row)); assertNull(r.getValue(fam1,q1)); assertNull(r.getValue(fam1,q2)); assertNotNull(r.getValue(fam1,q3)); assertNotNull(r.getValue(fam1,q4)); edge.incrementTime(5000); r=region.get(new Get(row)); assertNull(r.getValue(fam1,q1)); assertNull(r.getValue(fam1,q2)); assertNull(r.getValue(fam1,q3)); assertNotNull(r.getValue(fam1,q4)); edge.incrementTime(10000); r=region.get(new Get(row)); assertNull(r.getValue(fam1,q1)); assertNull(r.getValue(fam1,q2)); assertNull(r.getValue(fam1,q3)); assertNull(r.getValue(fam1,q4)); region.put(new Put(row).addColumn(fam1,q1,Bytes.toBytes(1L))); r=region.get(new Get(row)); byte[] val=r.getValue(fam1,q1); assertNotNull(val); assertEquals(Bytes.toLong(val),1L); Increment incr=new Increment(row).addColumn(fam1,q1,1L); incr.setTTL(5000); region.increment(incr); r=region.get(new Get(row)); val=r.getValue(fam1,q1); assertNotNull(val); assertEquals(Bytes.toLong(val),2L); edge.incrementTime(5000); r=region.get(new Get(row)); val=r.getValue(fam1,q1); assertNotNull(val); assertEquals(Bytes.toLong(val),1L); edge.incrementTime(5000); r=region.get(new Get(row)); assertNull(r.getValue(fam1,q1)); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanner_Wildcard_FromFilesOnly_EnforceVersions() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("qualifier2"); byte[] fam1=Bytes.toBytes("fam1"); long ts1=1; long ts2=ts1 + 1; long ts3=ts1 + 2; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { Put put=null; KeyValue kv13=new KeyValue(row1,fam1,qf1,ts3,KeyValue.Type.Put,null); KeyValue kv12=new KeyValue(row1,fam1,qf1,ts2,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(row1,fam1,qf1,ts1,KeyValue.Type.Put,null); KeyValue kv23=new KeyValue(row1,fam1,qf2,ts3,KeyValue.Type.Put,null); KeyValue kv22=new KeyValue(row1,fam1,qf2,ts2,KeyValue.Type.Put,null); KeyValue kv21=new KeyValue(row1,fam1,qf2,ts1,KeyValue.Type.Put,null); put=new Put(row1); put.add(kv13); put.add(kv12); put.add(kv11); put.add(kv23); put.add(kv22); put.add(kv21); region.put(put); region.flush(true); List expected=new ArrayList(); expected.add(kv13); expected.add(kv12); expected.add(kv23); expected.add(kv22); Scan scan=new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(MAX_VERSIONS); List actual=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(actual); assertEquals(false,hasNext); for (int i=0; i < expected.size(); i++) { assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i),actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

InternalCallVerifier EqualityVerifier 
@Test public void testBloomFilterSize() throws IOException { byte[] fam1=Bytes.toBytes("fam1"); byte[] qf1=Bytes.toBytes("col"); byte[] val1=Bytes.toBytes("value1"); HColumnDescriptor hcd=new HColumnDescriptor(fam1).setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(hcd); HRegionInfo info=new HRegionInfo(htd.getTableName(),null,null,false); this.region=TEST_UTIL.createLocalHRegion(info,htd); try { int num_unique_rows=10; int duplicate_multiplier=2; int num_storefiles=4; int version=0; for (int f=0; f < num_storefiles; f++) { for (int i=0; i < duplicate_multiplier; i++) { for (int j=0; j < num_unique_rows; j++) { Put put=new Put(Bytes.toBytes("row" + j)); put.setDurability(Durability.SKIP_WAL); long ts=version++; put.addColumn(fam1,qf1,ts,val1); region.put(put); } } region.flush(true); } HStore store=(HStore)region.getStore(fam1); Collection storeFiles=store.getStorefiles(); for ( StoreFile storefile : storeFiles) { StoreFile.Reader reader=storefile.getReader(); reader.loadFileInfo(); reader.loadBloomfilter(); assertEquals(num_unique_rows * duplicate_multiplier,reader.getEntries()); assertEquals(num_unique_rows,reader.getFilterEntries()); } region.compact(true); storeFiles=store.getStorefiles(); for ( StoreFile storefile : storeFiles) { StoreFile.Reader reader=storefile.getReader(); reader.loadFileInfo(); reader.loadBloomfilter(); assertEquals(num_unique_rows * duplicate_multiplier * num_storefiles,reader.getEntries()); assertEquals(num_unique_rows,reader.getFilterEntries()); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testMemstoreSnapshotSize() throws IOException { class MyFaultyFSLog extends FaultyFSLog { StoreFlushContext storeFlushCtx; public MyFaultyFSLog( FileSystem fs, Path rootDir, String logName, Configuration conf) throws IOException { super(fs,rootDir,logName,conf); } void setStoreFlushCtx( StoreFlushContext storeFlushCtx){ this.storeFlushCtx=storeFlushCtx; } @Override public void sync( long txid) throws IOException { storeFlushCtx.prepare(); super.sync(txid); } } FileSystem fs=FileSystem.get(CONF); Path rootDir=new Path(dir + "testMemstoreSnapshotSize"); MyFaultyFSLog faultyLog=new MyFaultyFSLog(fs,rootDir,"testMemstoreSnapshotSize",CONF); HRegion region=initHRegion(tableName,null,null,name.getMethodName(),CONF,false,Durability.SYNC_WAL,faultyLog,COLUMN_FAMILY_BYTES); Store store=region.getStore(COLUMN_FAMILY_BYTES); byte[] value=Bytes.toBytes(name.getMethodName()); faultyLog.setStoreFlushCtx(store.createFlushContext(12345)); Put put=new Put(value); put.addColumn(COLUMN_FAMILY_BYTES,Bytes.toBytes("abc"),value); faultyLog.setFailureType(FaultyFSLog.FailureType.SYNC); boolean threwIOE=false; try { region.put(put); } catch ( IOException ioe) { threwIOE=true; } finally { assertTrue("The regionserver should have thrown an exception",threwIOE); } long sz=store.getFlushableSize(); assertTrue("flushable size should be zero, but it is " + sz,sz == 0); HBaseTestingUtility.closeRegionAndWAL(region); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBatchPutWithTsSlop() throws Exception { TableName b=TableName.valueOf(getName()); byte[] cf=Bytes.toBytes(COLUMN_FAMILY); byte[] qual=Bytes.toBytes("qual"); byte[] val=Bytes.toBytes("val"); CONF.setInt("hbase.hregion.keyvalue.timestamp.slop.millisecs",1000); this.region=initHRegion(b,getName(),CONF,cf); try { MetricsWALSource source=CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); long syncs=metricsAssertHelper.getCounter("syncTimeNumOps",source); metricsAssertHelper.assertCounter("syncTimeNumOps",syncs,source); final Put[] puts=new Put[10]; for (int i=0; i < 10; i++) { puts[i]=new Put(Bytes.toBytes("row_" + i),Long.MAX_VALUE - 100); puts[i].addColumn(cf,qual,val); } OperationStatus[] codes=this.region.batchMutate(puts); assertEquals(10,codes.length); for (int i=0; i < 10; i++) { assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE,codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps",syncs,source); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that the special LATEST_TIMESTAMP option for puts gets replaced by * the actual timestamp */ @Test public void testPutWithLatestTS() throws IOException { byte[] fam=Bytes.toBytes("info"); byte[][] families={fam}; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { byte[] row=Bytes.toBytes("row1"); byte[] qual=Bytes.toBytes("qual"); Put put=new Put(row); put.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,Bytes.toBytes("value")); region.put(put); Get get=new Get(row).addColumn(fam,qual); Result result=region.get(get); assertEquals(1,result.size()); Cell kv=result.rawCells()[0]; LOG.info("Got: " + kv); assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp",kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); row=Bytes.toBytes("row2"); put=new Put(row); put.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,Bytes.toBytes("value")); region.put(put); get=new Get(row).addColumn(fam,qual); result=region.get(get); assertEquals(1,result.size()); kv=result.rawCells()[0]; LOG.info("Got: " + kv); assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp",kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDelete_mixed() throws IOException, InterruptedException { byte[] fam=Bytes.toBytes("info"); byte[][] families={fam}; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); byte[] row=Bytes.toBytes("table_name"); byte[] serverinfo=Bytes.toBytes("serverinfo"); byte[] splitA=Bytes.toBytes("splitA"); byte[] splitB=Bytes.toBytes("splitB"); Put put=new Put(row); put.addColumn(fam,splitA,Bytes.toBytes("reference_A")); region.put(put); put=new Put(row); put.addColumn(fam,splitB,Bytes.toBytes("reference_B")); region.put(put); put=new Put(row); put.addColumn(fam,serverinfo,Bytes.toBytes("ip_address")); region.put(put); Delete delete=new Delete(row); delete.addColumns(fam,splitA); region.delete(delete); Get get=new Get(row).addColumn(fam,serverinfo); Result result=region.get(get); assertEquals(1,result.size()); get=new Get(row).addColumn(fam,splitA); result=region.get(get); assertEquals(0,result.size()); get=new Get(row).addColumn(fam,splitB); result=region.get(get); assertEquals(1,result.size()); put=new Put(row); put.addColumn(fam,splitA,Bytes.toBytes("reference_A")); region.put(put); get=new Get(row); result=region.get(get); assertEquals(3,result.size()); delete=new Delete(row); region.delete(delete); assertEquals(0,region.get(get).size()); region.put(new Put(row).addColumn(fam,splitA,Bytes.toBytes("reference_A"))); result=region.get(get); assertEquals(1,result.size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testScanner_Wildcard_FromMemStore_EnforceVersions() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("qualifier2"); byte[] fam1=Bytes.toBytes("fam1"); byte[][] families={fam1}; long ts1=System.currentTimeMillis(); long ts2=ts1 + 1; long ts3=ts1 + 2; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { Put put=null; KeyValue kv13=new KeyValue(row1,fam1,qf1,ts3,KeyValue.Type.Put,null); KeyValue kv12=new KeyValue(row1,fam1,qf1,ts2,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(row1,fam1,qf1,ts1,KeyValue.Type.Put,null); KeyValue kv23=new KeyValue(row1,fam1,qf2,ts3,KeyValue.Type.Put,null); KeyValue kv22=new KeyValue(row1,fam1,qf2,ts2,KeyValue.Type.Put,null); KeyValue kv21=new KeyValue(row1,fam1,qf2,ts1,KeyValue.Type.Put,null); put=new Put(row1); put.add(kv13); put.add(kv12); put.add(kv11); put.add(kv23); put.add(kv22); put.add(kv21); region.put(put); List expected=new ArrayList(); expected.add(kv13); expected.add(kv12); expected.add(kv23); expected.add(kv22); Scan scan=new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(MAX_VERSIONS); List actual=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(actual); assertEquals(false,hasNext); for (int i=0; i < expected.size(); i++) { assertEquals(expected.get(i),actual.get(i)); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReverseScanner_FromMemStore_SingleCF_Normal() throws IOException { byte[] rowC=Bytes.toBytes("rowC"); byte[] rowA=Bytes.toBytes("rowA"); byte[] rowB=Bytes.toBytes("rowB"); byte[] cf=Bytes.toBytes("CF"); byte[][] families={cf}; byte[] col=Bytes.toBytes("C"); long ts=1; String method=this.getName(); this.region=initHRegion(tableName,method,families); try { KeyValue kv1=new KeyValue(rowC,cf,col,ts,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(rowC,cf,col,ts + 1,KeyValue.Type.Put,null); KeyValue kv2=new KeyValue(rowA,cf,col,ts,KeyValue.Type.Put,null); KeyValue kv3=new KeyValue(rowB,cf,col,ts,KeyValue.Type.Put,null); Put put=null; put=new Put(rowC); put.add(kv1); put.add(kv11); region.put(put); put=new Put(rowA); put.add(kv2); region.put(put); put=new Put(rowB); put.add(kv3); region.put(put); Scan scan=new Scan(rowC); scan.setMaxVersions(5); scan.setReversed(true); InternalScanner scanner=region.getScanner(scan); List currRow=new ArrayList(); boolean hasNext=scanner.next(currRow); assertEquals(2,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowC,0,rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowB,0,rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowA,0,rowA.length)); assertFalse(hasNext); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() throws IOException { byte[] row0=Bytes.toBytes("row0"); byte[] row1=Bytes.toBytes("row1"); byte[] row2=Bytes.toBytes("row2"); byte[] row3=Bytes.toBytes("row3"); byte[] row4=Bytes.toBytes("row4"); byte[] row5=Bytes.toBytes("row5"); byte[] cf1=Bytes.toBytes("CF1"); byte[] cf2=Bytes.toBytes("CF2"); byte[] cf3=Bytes.toBytes("CF3"); byte[][] families={cf1,cf2,cf3}; byte[] col=Bytes.toBytes("C"); long ts=1; String method=this.getName(); HBaseConfiguration conf=new HBaseConfiguration(); conf.setInt("hbase.hstore.compactionThreshold",10000); this.region=initHRegion(tableName,method,conf,families); try { KeyValue kv0_1_1=new KeyValue(row0,cf1,col,ts,KeyValue.Type.Put,null); KeyValue kv1_2_1=new KeyValue(row1,cf2,col,ts,KeyValue.Type.Put,null); KeyValue kv1_2_2=new KeyValue(row1,cf1,col,ts + 1,KeyValue.Type.Put,null); KeyValue kv2_4_1=new KeyValue(row2,cf2,col,ts,KeyValue.Type.Put,null); KeyValue kv2_4_2=new KeyValue(row2,cf1,col,ts,KeyValue.Type.Put,null); KeyValue kv2_4_3=new KeyValue(row2,cf3,col,ts,KeyValue.Type.Put,null); KeyValue kv2_4_4=new KeyValue(row2,cf1,col,ts + 4,KeyValue.Type.Put,null); KeyValue kv3_2_1=new KeyValue(row3,cf2,col,ts,KeyValue.Type.Put,null); KeyValue kv3_2_2=new KeyValue(row3,cf1,col,ts + 4,KeyValue.Type.Put,null); KeyValue kv4_5_1=new KeyValue(row4,cf1,col,ts,KeyValue.Type.Put,null); KeyValue kv4_5_2=new KeyValue(row4,cf3,col,ts,KeyValue.Type.Put,null); KeyValue kv4_5_3=new KeyValue(row4,cf3,col,ts + 5,KeyValue.Type.Put,null); KeyValue kv4_5_4=new KeyValue(row4,cf2,col,ts,KeyValue.Type.Put,null); KeyValue kv4_5_5=new KeyValue(row4,cf1,col,ts + 3,KeyValue.Type.Put,null); KeyValue kv5_2_1=new KeyValue(row5,cf2,col,ts,KeyValue.Type.Put,null); KeyValue kv5_2_2=new KeyValue(row5,cf3,col,ts,KeyValue.Type.Put,null); Put put=null; put=new Put(row1); put.add(kv1_2_1); region.put(put); put=new Put(row2); put.add(kv2_4_1); region.put(put); put=new Put(row4); put.add(kv4_5_4); put.add(kv4_5_5); region.put(put); region.flush(true); put=new Put(row4); put.add(kv4_5_1); put.add(kv4_5_3); region.put(put); put=new Put(row1); put.add(kv1_2_2); region.put(put); put=new Put(row2); put.add(kv2_4_4); region.put(put); region.flush(true); put=new Put(row4); put.add(kv4_5_2); region.put(put); put=new Put(row2); put.add(kv2_4_2); put.add(kv2_4_3); region.put(put); put=new Put(row3); put.add(kv3_2_2); region.put(put); region.flush(true); put=new Put(row0); put.add(kv0_1_1); region.put(put); put=new Put(row3); put.add(kv3_2_1); region.put(put); put=new Put(row5); put.add(kv5_2_1); put.add(kv5_2_2); region.put(put); Scan scan=new Scan(row4); scan.setMaxVersions(5); scan.setBatch(3); scan.setReversed(true); InternalScanner scanner=region.getScanner(scan); List currRow=new ArrayList(); boolean hasNext=false; hasNext=scanner.next(currRow); assertEquals(3,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row4,0,row4.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(2,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row4,0,row4.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(2,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row3,0,row3.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(3,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row2,0,row2.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row2,0,row2.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(2,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row1,0,row1.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row0,0,row0.length)); assertFalse(hasNext); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCompactionFromPrimary() throws IOException { Path rootDir=new Path(dir + "testRegionReplicaSecondary"); FSUtils.setRootDir(TEST_UTIL.getConfiguration(),rootDir); byte[][] families=new byte[][]{Bytes.toBytes("cf1"),Bytes.toBytes("cf2"),Bytes.toBytes("cf3")}; byte[] cq=Bytes.toBytes("cq"); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary")); for ( byte[] family : families) { htd.addFamily(new HColumnDescriptor(family)); } long time=System.currentTimeMillis(); HRegionInfo primaryHri=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,false,time,0); HRegionInfo secondaryHri=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,false,time,1); HRegion primaryRegion=null, secondaryRegion=null; try { primaryRegion=HBaseTestingUtility.createRegionAndWAL(primaryHri,rootDir,TEST_UTIL.getConfiguration(),htd); putData(primaryRegion,0,1000,cq,families); primaryRegion.flush(true); secondaryRegion=HRegion.openHRegion(rootDir,secondaryHri,htd,null,CONF); Collection storeFiles=primaryRegion.getStore(families[0]).getStorefiles(); primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]),storeFiles); Collection storeFileInfos=primaryRegion.getRegionFileSystem().getStoreFiles(families[0]); Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0); verifyData(secondaryRegion,0,1000,cq,families); } finally { if (primaryRegion != null) { HBaseTestingUtility.closeRegionAndWAL(primaryRegion); } if (secondaryRegion != null) { HBaseTestingUtility.closeRegionAndWAL(secondaryRegion); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReverseScanner_moreRowsMayExistAfter() throws IOException { byte[] rowA=Bytes.toBytes("rowA"); byte[] rowB=Bytes.toBytes("rowB"); byte[] rowC=Bytes.toBytes("rowC"); byte[] rowD=Bytes.toBytes("rowD"); byte[] rowE=Bytes.toBytes("rowE"); byte[] cf=Bytes.toBytes("CF"); byte[][] families={cf}; byte[] col1=Bytes.toBytes("col1"); byte[] col2=Bytes.toBytes("col2"); long ts=1; String method=this.getName(); this.region=initHRegion(tableName,method,families); try { KeyValue kv1=new KeyValue(rowA,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv2=new KeyValue(rowB,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv3=new KeyValue(rowC,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv4_1=new KeyValue(rowD,cf,col1,ts,KeyValue.Type.Put,null); KeyValue kv4_2=new KeyValue(rowD,cf,col2,ts,KeyValue.Type.Put,null); KeyValue kv5=new KeyValue(rowE,cf,col1,ts,KeyValue.Type.Put,null); Put put=null; put=new Put(rowA); put.add(kv1); region.put(put); put=new Put(rowB); put.add(kv2); region.put(put); put=new Put(rowC); put.add(kv3); region.put(put); put=new Put(rowD); put.add(kv4_1); region.put(put); put=new Put(rowD); put.add(kv4_2); region.put(put); put=new Put(rowE); put.add(kv5); region.put(put); region.flush(true); Scan scan=new Scan(rowD,rowA); scan.addColumn(families[0],col1); scan.setReversed(true); List currRow=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowD,0,rowD.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowC,0,rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowB,0,rowB.length)); assertFalse(hasNext); scanner.close(); scan=new Scan(rowD,rowA); scan.addColumn(families[0],col2); scan.setReversed(true); currRow.clear(); scanner=region.getScanner(scan); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowD,0,rowD.length)); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteRowWithFutureTs() throws IOException { byte[] fam=Bytes.toBytes("info"); byte[][] families={fam}; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { byte[] row=Bytes.toBytes("table_name"); byte[] serverinfo=Bytes.toBytes("serverinfo"); Put put=new Put(row); put.addColumn(fam,serverinfo,HConstants.LATEST_TIMESTAMP - 5,Bytes.toBytes("value")); region.put(put); Delete delete=new Delete(row); region.delete(delete); Get get=new Get(row).addColumn(fam,serverinfo); Result result=region.get(get); assertEquals(1,result.size()); delete=new Delete(row,HConstants.LATEST_TIMESTAMP - 3); region.delete(delete); get=new Get(row).addColumn(fam,serverinfo); result=region.get(get); assertEquals(0,result.size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCheckAndMutate_WithWrongValue() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] qf1=Bytes.toBytes("qualifier"); byte[] val1=Bytes.toBytes("value1"); byte[] val2=Bytes.toBytes("value2"); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { Put put=new Put(row1); put.addColumn(fam1,qf1,val1); region.put(put); boolean res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val2),put,true); assertEquals(false,res); Delete delete=new Delete(row1); delete.addFamily(fam1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val2),put,true); assertEquals(false,res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

BranchVerifier InternalCallVerifier BooleanVerifier 
@Test public void testGetWhileRegionClose() throws IOException { TableName tableName=TableName.valueOf(name.getMethodName()); Configuration hc=initSplit(); int numRows=100; byte[][] families={fam1,fam2,fam3}; String method=name.getMethodName(); this.region=initHRegion(tableName,method,hc,families); try { final int startRow=100; putData(startRow,numRows,qual1,families); putData(startRow,numRows,qual2,families); putData(startRow,numRows,qual3,families); final AtomicBoolean done=new AtomicBoolean(false); final AtomicInteger gets=new AtomicInteger(0); GetTillDoneOrException[] threads=new GetTillDoneOrException[10]; try { for (int i=0; i < threads.length / 2; i++) { threads[i]=new GetTillDoneOrException(i,Bytes.toBytes("" + startRow),done,gets); threads[i].setDaemon(true); threads[i].start(); } this.region.closing.set(true); for (int i=threads.length / 2; i < threads.length; i++) { threads[i]=new GetTillDoneOrException(i,Bytes.toBytes("" + startRow),done,gets); threads[i].setDaemon(true); threads[i].start(); } } finally { if (this.region != null) { HBaseTestingUtility.closeRegionAndWAL(this.region); } } done.set(true); for ( GetTillDoneOrException t : threads) { try { t.join(); } catch ( InterruptedException e) { e.printStackTrace(); } if (t.e != null) { LOG.info("Exception=" + t.e); assertFalse("Found a NPE in " + t.getName(),t.e instanceof NullPointerException); } } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCheckAndMutate_WithEmptyRowValue() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] qf1=Bytes.toBytes("qualifier"); byte[] emptyVal=new byte[]{}; byte[] val1=Bytes.toBytes("value1"); byte[] val2=Bytes.toBytes("value2"); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { Put put=new Put(row1); put.addColumn(fam1,qf1,emptyVal); boolean res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(emptyVal),put,true); assertTrue(res); put=new Put(row1); put.addColumn(fam1,qf1,val1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(emptyVal),put,true); assertTrue(res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(emptyVal),put,true); assertFalse(res); Delete delete=new Delete(row1); delete.addColumn(fam1,qf1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(emptyVal),delete,true); assertFalse(res); put=new Put(row1); put.addColumn(fam1,qf1,val2); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val1),put,true); assertTrue(res); delete=new Delete(row1); delete.addColumn(fam1,qf1); delete.addColumn(fam1,qf1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(val2),delete,true); assertTrue(res); delete=new Delete(row1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new BinaryComparator(emptyVal),delete,true); assertTrue(res); put=new Put(row1); put.addColumn(fam1,qf1,val1); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.EQUAL,new NullComparator(),put,true); assertTrue(res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSkipRecoveredEditsReplay() throws Exception { String method="testSkipRecoveredEditsReplay"; TableName tableName=TableName.valueOf(method); byte[] family=Bytes.toBytes("family"); this.region=initHRegion(tableName,method,CONF,family); final WALFactory wals=new WALFactory(CONF,null,method); try { Path regiondir=region.getRegionFileSystem().getRegionDir(); FileSystem fs=region.getRegionFileSystem().getFileSystem(); byte[] regionName=region.getRegionInfo().getEncodedNameAsBytes(); Path recoveredEditsDir=WALSplitter.getRegionDirRecoveredEditsDir(regiondir); long maxSeqId=1050; long minSeqId=1000; for (long i=minSeqId; i <= maxSeqId; i+=10) { Path recoveredEdits=new Path(recoveredEditsDir,String.format("%019d",i)); fs.create(recoveredEdits); WALProvider.Writer writer=wals.createRecoveredEditsWriter(fs,recoveredEdits); long time=System.nanoTime(); WALEdit edit=new WALEdit(); edit.add(new KeyValue(row,family,Bytes.toBytes(i),time,KeyValue.Type.Put,Bytes.toBytes(i))); writer.append(new WAL.Entry(new HLogKey(regionName,tableName,i,time,HConstants.DEFAULT_CLUSTER_ID),edit)); writer.close(); } MonitoredTask status=TaskMonitor.get().createStatus(method); Map maxSeqIdInStores=new TreeMap(Bytes.BYTES_COMPARATOR); for ( Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),minSeqId - 1); } long seqId=region.replayRecoveredEditsIfAny(regiondir,maxSeqIdInStores,null,status); assertEquals(maxSeqId,seqId); region.getMVCC().advanceTo(seqId); Get get=new Get(row); Result result=region.get(get); for (long i=minSeqId; i <= maxSeqId; i+=10) { List kvs=result.getColumnCells(family,Bytes.toBytes(i)); assertEquals(1,kvs.size()); assertArrayEquals(Bytes.toBytes(i),CellUtil.cloneValue(kvs.get(0))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; wals.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception { String method="testSkipRecoveredEditsReplayTheLastFileIgnored"; TableName tableName=TableName.valueOf(method); byte[] family=Bytes.toBytes("family"); this.region=initHRegion(tableName,method,CONF,family); final WALFactory wals=new WALFactory(CONF,null,method); try { Path regiondir=region.getRegionFileSystem().getRegionDir(); FileSystem fs=region.getRegionFileSystem().getFileSystem(); byte[] regionName=region.getRegionInfo().getEncodedNameAsBytes(); byte[][] columns=region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]); assertEquals(0,region.getStoreFileList(columns).size()); Path recoveredEditsDir=WALSplitter.getRegionDirRecoveredEditsDir(regiondir); long maxSeqId=1050; long minSeqId=1000; for (long i=minSeqId; i <= maxSeqId; i+=10) { Path recoveredEdits=new Path(recoveredEditsDir,String.format("%019d",i)); fs.create(recoveredEdits); WALProvider.Writer writer=wals.createRecoveredEditsWriter(fs,recoveredEdits); long time=System.nanoTime(); WALEdit edit=null; if (i == maxSeqId) { edit=WALEdit.createCompaction(region.getRegionInfo(),CompactionDescriptor.newBuilder().setTableName(ByteString.copyFrom(tableName.getName())).setFamilyName(ByteString.copyFrom(regionName)).setEncodedRegionName(ByteString.copyFrom(regionName)).setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))).setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())).build()); } else { edit=new WALEdit(); edit.add(new KeyValue(row,family,Bytes.toBytes(i),time,KeyValue.Type.Put,Bytes.toBytes(i))); } writer.append(new WAL.Entry(new HLogKey(regionName,tableName,i,time,HConstants.DEFAULT_CLUSTER_ID),edit)); writer.close(); } long recoverSeqId=1030; Map maxSeqIdInStores=new TreeMap(Bytes.BYTES_COMPARATOR); MonitoredTask status=TaskMonitor.get().createStatus(method); for ( Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),recoverSeqId - 1); } long seqId=region.replayRecoveredEditsIfAny(regiondir,maxSeqIdInStores,null,status); assertEquals(maxSeqId,seqId); assertEquals(1,region.getStoreFileList(columns).size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; wals.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test case to check append function with memstore flushing * @throws Exception */ @Test public void testParallelAppendWithMemStoreFlush() throws Exception { byte[] family=Appender.family; this.region=initHRegion(tableName,method,CONF,family); final HRegion region=this.region; final AtomicBoolean appendDone=new AtomicBoolean(false); Runnable flusher=new Runnable(){ @Override public void run(){ while (!appendDone.get()) { try { region.flush(true); } catch ( Exception e) { e.printStackTrace(); } } } } ; int threadNum=20; int appendCounter=100; byte[] expected=new byte[threadNum * appendCounter]; for (int i=0; i < threadNum * appendCounter; i++) { System.arraycopy(Appender.CHAR,0,expected,i,1); } Thread[] appenders=new Thread[threadNum]; Thread flushThread=new Thread(flusher); for (int i=0; i < threadNum; i++) { appenders[i]=new Thread(new Appender(this.region,appendCounter)); appenders[i].start(); } flushThread.start(); for (int i=0; i < threadNum; i++) { appenders[i].join(); } appendDone.set(true); flushThread.join(); Get get=new Get(Appender.appendRow); get.addColumn(Appender.family,Appender.qualifier); get.setMaxVersions(1); Result res=this.region.get(get); List kvs=res.getColumnCells(Appender.family,Appender.qualifier); assertEquals(kvs.size(),1); Cell kv=kvs.get(0); byte[] appendResult=new byte[kv.getValueLength()]; System.arraycopy(kv.getValueArray(),kv.getValueOffset(),appendResult,0,kv.getValueLength()); assertArrayEquals(expected,appendResult); this.region=null; }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testScanner_StopRow1542() throws IOException { byte[] family=Bytes.toBytes("testFamily"); this.region=initHRegion(tableName,getName(),CONF,family); try { byte[] row1=Bytes.toBytes("row111"); byte[] row2=Bytes.toBytes("row222"); byte[] row3=Bytes.toBytes("row333"); byte[] row4=Bytes.toBytes("row444"); byte[] row5=Bytes.toBytes("row555"); byte[] col1=Bytes.toBytes("Pub111"); byte[] col2=Bytes.toBytes("Pub222"); Put put=new Put(row1); put.addColumn(family,col1,Bytes.toBytes(10L)); region.put(put); put=new Put(row2); put.addColumn(family,col1,Bytes.toBytes(15L)); region.put(put); put=new Put(row3); put.addColumn(family,col2,Bytes.toBytes(20L)); region.put(put); put=new Put(row4); put.addColumn(family,col2,Bytes.toBytes(30L)); region.put(put); put=new Put(row5); put.addColumn(family,col1,Bytes.toBytes(40L)); region.put(put); Scan scan=new Scan(row3,row4); scan.setMaxVersions(); scan.addColumn(family,col1); InternalScanner s=region.getScanner(scan); List results=new ArrayList(); assertEquals(false,s.next(results)); assertEquals(0,results.size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testSplitRegionWithReverseScan() throws IOException { TableName tableName=TableName.valueOf("testSplitRegionWithReverseScan"); byte[] qualifier=Bytes.toBytes("qualifier"); Configuration hc=initSplit(); int numRows=3; byte[][] families={fam1}; String method=this.getName(); this.region=initHRegion(tableName,method,hc,families); int startRow=100; putData(startRow,numRows,qualifier,families); int splitRow=startRow + numRows; putData(splitRow,numRows,qualifier,families); region.flush(true); HRegion[] regions=null; try { regions=splitRegion(region,Bytes.toBytes("" + splitRow)); for (int i=0; i < regions.length; i++) { regions[i]=HRegion.openHRegion(regions[i],null); } assertEquals(2,regions.length); verifyData(regions[0],startRow,numRows,qualifier,families); verifyData(regions[1],splitRow,numRows,qualifier,families); Scan scan=new Scan(Bytes.toBytes(String.valueOf(startRow + 10 * numRows))); scan.setReversed(true); InternalScanner scanner=regions[1].getScanner(scan); List currRow=new ArrayList(); boolean more=false; int verify=startRow + 2 * numRows - 1; do { more=scanner.next(currRow); assertEquals(Bytes.toString(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength()),verify + ""); verify--; currRow.clear(); } while (more); assertEquals(verify,startRow + numRows - 1); scanner.close(); scan=new Scan(Bytes.toBytes(String.valueOf(startRow + 2 * numRows - 1))); scan.setReversed(true); scanner=regions[1].getScanner(scan); verify=startRow + 2 * numRows - 1; do { more=scanner.next(currRow); assertEquals(Bytes.toString(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength()),verify + ""); verify--; currRow.clear(); } while (more); assertEquals(verify,startRow + numRows - 1); scanner.close(); scan=new Scan(Bytes.toBytes(String.valueOf(startRow + numRows))); scan.setReversed(true); scanner=regions[0].getScanner(scan); verify=startRow + numRows - 1; do { more=scanner.next(currRow); assertEquals(Bytes.toString(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength()),verify + ""); verify--; currRow.clear(); } while (more); assertEquals(verify,99); scanner.close(); scan=new Scan(Bytes.toBytes(String.valueOf(startRow + numRows - 1))); scan.setReversed(true); scanner=regions[0].getScanner(scan); verify=startRow + numRows - 1; do { more=scanner.next(currRow); assertEquals(Bytes.toString(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength()),verify + ""); verify--; currRow.clear(); } while (more); assertEquals(verify,startRow - 1); scanner.close(); } finally { this.region.close(); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReverseScanner_FromMemStore_SingleCF_LargerKey() throws IOException { byte[] rowC=Bytes.toBytes("rowC"); byte[] rowA=Bytes.toBytes("rowA"); byte[] rowB=Bytes.toBytes("rowB"); byte[] rowD=Bytes.toBytes("rowD"); byte[] cf=Bytes.toBytes("CF"); byte[][] families={cf}; byte[] col=Bytes.toBytes("C"); long ts=1; String method=this.getName(); this.region=initHRegion(tableName,method,families); try { KeyValue kv1=new KeyValue(rowC,cf,col,ts,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(rowC,cf,col,ts + 1,KeyValue.Type.Put,null); KeyValue kv2=new KeyValue(rowA,cf,col,ts,KeyValue.Type.Put,null); KeyValue kv3=new KeyValue(rowB,cf,col,ts,KeyValue.Type.Put,null); Put put=null; put=new Put(rowC); put.add(kv1); put.add(kv11); region.put(put); put=new Put(rowA); put.add(kv2); region.put(put); put=new Put(rowB); put.add(kv3); region.put(put); Scan scan=new Scan(rowD); List currRow=new ArrayList(); scan.setReversed(true); scan.setMaxVersions(5); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(currRow); assertEquals(2,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowC,0,rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowB,0,rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),rowA,0,rowA.length)); assertFalse(hasNext); scanner.close(); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("quateslifier2"); long ts1=1; long ts2=ts1 + 1; long ts3=ts1 + 2; long ts4=ts1 + 3; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { KeyValue kv14=new KeyValue(row1,fam1,qf1,ts4,KeyValue.Type.Put,null); KeyValue kv13=new KeyValue(row1,fam1,qf1,ts3,KeyValue.Type.Put,null); KeyValue kv12=new KeyValue(row1,fam1,qf1,ts2,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(row1,fam1,qf1,ts1,KeyValue.Type.Put,null); KeyValue kv24=new KeyValue(row1,fam1,qf2,ts4,KeyValue.Type.Put,null); KeyValue kv23=new KeyValue(row1,fam1,qf2,ts3,KeyValue.Type.Put,null); KeyValue kv22=new KeyValue(row1,fam1,qf2,ts2,KeyValue.Type.Put,null); KeyValue kv21=new KeyValue(row1,fam1,qf2,ts1,KeyValue.Type.Put,null); Put put=null; put=new Put(row1); put.add(kv14); put.add(kv24); region.put(put); region.flush(true); put=new Put(row1); put.add(kv23); put.add(kv13); region.put(put); region.flush(true); put=new Put(row1); put.add(kv22); put.add(kv12); region.put(put); region.flush(true); put=new Put(row1); put.add(kv21); put.add(kv11); region.put(put); List expected=new ArrayList(); expected.add(kv14); expected.add(kv13); expected.add(kv12); expected.add(kv24); expected.add(kv23); expected.add(kv22); Scan scan=new Scan(row1); int versions=3; scan.setMaxVersions(versions); List actual=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(actual); assertEquals(false,hasNext); for (int i=0; i < expected.size(); i++) { assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i),actual.get(i))); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testBatchMutateWithWrongRegionException() throws Exception { final byte[] a=Bytes.toBytes("a"); final byte[] b=Bytes.toBytes("b"); final byte[] c=Bytes.toBytes("c"); int prevLockTimeout=CONF.getInt("hbase.rowlock.wait.duration",30000); CONF.setInt("hbase.rowlock.wait.duration",1000); final HRegion region=initHRegion(tableName,a,c,name.getMethodName(),CONF,false,fam1); Mutation[] mutations=new Mutation[]{new Put(a).addImmutable(fam1,null,null),new Put(c).addImmutable(fam1,null,null),new Put(b).addImmutable(fam1,null,null)}; OperationStatus[] status=region.batchMutate(mutations); assertEquals(status[0].getOperationStatusCode(),OperationStatusCode.SUCCESS); assertEquals(status[1].getOperationStatusCode(),OperationStatusCode.SANITY_CHECK_FAILURE); assertEquals(status[2].getOperationStatusCode(),OperationStatusCode.SUCCESS); final CountDownLatch obtainedRowLock=new CountDownLatch(1); ExecutorService exec=Executors.newFixedThreadPool(2); Future f1=exec.submit(new Callable(){ @Override public Void call() throws Exception { LOG.info("Acquiring row lock"); RowLock rl=region.getRowLock(b); obtainedRowLock.countDown(); LOG.info("Waiting for 5 seconds before releasing lock"); Threads.sleep(5000); LOG.info("Releasing row lock"); rl.release(); return null; } } ); obtainedRowLock.await(30,TimeUnit.SECONDS); Future f2=exec.submit(new Callable(){ @Override public Void call() throws Exception { Mutation[] mutations=new Mutation[]{new Put(a).addImmutable(fam1,null,null),new Put(b).addImmutable(fam1,null,null)}; OperationStatus[] status=region.batchMutate(mutations); assertEquals(status[0].getOperationStatusCode(),OperationStatusCode.SUCCESS); assertEquals(status[1].getOperationStatusCode(),OperationStatusCode.SUCCESS); return null; } } ); f1.get(); f2.get(); CONF.setInt("hbase.rowlock.wait.duration",prevLockTimeout); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testgetHDFSBlocksDistribution() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.replication",2); MiniHBaseCluster cluster=null; String dataNodeHosts[]=new String[]{"host1","host2","host3"}; int regionServersCount=3; try { cluster=htu.startMiniCluster(1,regionServersCount,dataNodeHosts); byte[][] families={fam1,fam2}; Table ht=htu.createTable(TableName.valueOf(this.getName()),families); byte row[]=Bytes.toBytes("row1"); byte col[]=Bytes.toBytes("col1"); Put put=new Put(row); put.addColumn(fam1,col,(long)1,Bytes.toBytes("test1")); put.addColumn(fam2,col,(long)1,Bytes.toBytes("test2")); ht.put(put); HRegion firstRegion=htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName())).get(0); firstRegion.flush(true); HDFSBlocksDistribution blocksDistribution1=firstRegion.getHDFSBlocksDistribution(); long uniqueBlocksWeight1=blocksDistribution1.getUniqueBlocksTotalWeight(); StringBuilder sb=new StringBuilder(); for ( String host : blocksDistribution1.getTopHosts()) { if (sb.length() > 0) sb.append(", "); sb.append(host); sb.append("="); sb.append(blocksDistribution1.getWeight(host)); } String topHost=blocksDistribution1.getTopHosts().get(0); long topHostWeight=blocksDistribution1.getWeight(topHost); String msg="uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight="+ topHostWeight+ ", topHost="+ topHost+ "; "+ sb.toString(); LOG.info(msg); assertTrue(msg,uniqueBlocksWeight1 == topHostWeight); HDFSBlocksDistribution blocksDistribution2=HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(),firstRegion.getTableDesc(),firstRegion.getRegionInfo()); long uniqueBlocksWeight2=blocksDistribution2.getUniqueBlocksTotalWeight(); assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2); ht.close(); } finally { if (cluster != null) { htu.shutdownMiniCluster(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCheckAndMutate_WithNonEqualCompareOp() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] qf1=Bytes.toBytes("qualifier"); byte[] val1=Bytes.toBytes("value1"); byte[] val2=Bytes.toBytes("value2"); byte[] val3=Bytes.toBytes("value3"); byte[] val4=Bytes.toBytes("value4"); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { Put put=new Put(row1); put.addColumn(fam1,qf1,val3); region.put(put); boolean res=region.checkAndMutate(row1,fam1,qf1,CompareOp.LESS,new BinaryComparator(val3),put,true); assertEquals(false,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.LESS,new BinaryComparator(val4),put,true); assertEquals(false,res); put=new Put(row1); put.addColumn(fam1,qf1,val2); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.LESS,new BinaryComparator(val2),put,true); assertEquals(true,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.LESS_OR_EQUAL,new BinaryComparator(val3),put,true); assertEquals(false,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.LESS_OR_EQUAL,new BinaryComparator(val2),put,true); assertEquals(true,res); put=new Put(row1); put.addColumn(fam1,qf1,val3); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.LESS_OR_EQUAL,new BinaryComparator(val1),put,true); assertEquals(true,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.GREATER,new BinaryComparator(val3),put,true); assertEquals(false,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.GREATER,new BinaryComparator(val2),put,true); assertEquals(false,res); put=new Put(row1); put.addColumn(fam1,qf1,val2); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.GREATER,new BinaryComparator(val4),put,true); assertEquals(true,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.GREATER_OR_EQUAL,new BinaryComparator(val1),put,true); assertEquals(false,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.GREATER_OR_EQUAL,new BinaryComparator(val2),put,true); assertEquals(true,res); res=region.checkAndMutate(row1,fam1,qf1,CompareOp.GREATER_OR_EQUAL,new BinaryComparator(val3),put,true); assertEquals(true,res); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testClearForceSplit() throws IOException { byte[] qualifier=Bytes.toBytes("qualifier"); Configuration hc=initSplit(); int numRows=10; byte[][] families={fam1,fam3}; String method=this.getName(); this.region=initHRegion(tableName,method,hc,families); int startRow=100; putData(startRow,numRows,qualifier,families); int splitRow=startRow + numRows; byte[] splitRowBytes=Bytes.toBytes("" + splitRow); putData(splitRow,numRows,qualifier,families); region.flush(true); HRegion[] regions=null; try { region.forceSplit(splitRowBytes); assertTrue(region.shouldForceSplit()); assertTrue(Bytes.equals(splitRowBytes,region.checkSplit())); HStore storeMock=Mockito.mock(HStore.class); when(storeMock.hasReferences()).thenReturn(true); when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf")); when(storeMock.close()).thenReturn(ImmutableList.of()); when(storeMock.getColumnFamilyName()).thenReturn("cf"); region.stores.put(Bytes.toBytes(storeMock.getColumnFamilyName()),storeMock); assertTrue(region.hasReferences()); regions=splitRegion(region,splitRowBytes); assertNull(regions); assertFalse(region.shouldForceSplit()); region.stores.remove(Bytes.toBytes(storeMock.getColumnFamilyName())); assertFalse(region.hasReferences()); regions=splitRegion(region,splitRowBytes); for (int i=0; i < regions.length; i++) { regions[i]=HRegion.openHRegion(regions[i],null); } assertEquals(2,regions.length); verifyData(regions[0],startRow,numRows,qualifier,families); verifyData(regions[1],splitRow,numRows,qualifier,families); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testCloseWithFailingFlush() throws Exception { final Configuration conf=HBaseConfiguration.create(CONF); final String callingMethod=name.getMethodName(); final WAL wal=createWALCompatibleWithFaultyFileSystem(callingMethod,conf,tableName); conf.setInt("hbase.hstore.flush.retries.number",1); final User user=User.createUserForTesting(conf,this.name.getMethodName(),new String[]{"foo"}); conf.setClass("fs.file.impl",FaultyFileSystem.class,FileSystem.class); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { FileSystem fs=FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class,fs.getClass()); FaultyFileSystem ffs=(FaultyFileSystem)fs; HRegion region=null; try { region=initHRegion(tableName,null,null,callingMethod,conf,false,Durability.SYNC_WAL,wal,COLUMN_FAMILY_BYTES); long size=region.getMemstoreSize(); Assert.assertEquals(0,size); Put p1=new Put(row); p1.add(new KeyValue(row,COLUMN_FAMILY_BYTES,qual1,1,(byte[])null)); region.put(p1); Store store=region.getStore(COLUMN_FAMILY_BYTES); StoreFlushContext storeFlushCtx=store.createFlushContext(12345); storeFlushCtx.prepare(); Put p2=new Put(row); p2.add(new KeyValue(row,COLUMN_FAMILY_BYTES,qual2,2,(byte[])null)); p2.add(new KeyValue(row,COLUMN_FAMILY_BYTES,qual3,3,(byte[])null)); region.put(p2); region.close(); fail(); } catch ( DroppedSnapshotException dse) { LOG.info("Expected DroppedSnapshotException"); } finally { ffs.fault.set(false); HBaseTestingUtility.closeRegionAndWAL(region); } return null; } } ); FileSystem.closeAllForUGI(user.getUGI()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test case to check put function with memstore flushing for same row, same ts * @throws Exception */ @Test public void testPutWithMemStoreFlush() throws Exception { byte[] family=Bytes.toBytes("family"); ; byte[] qualifier=Bytes.toBytes("qualifier"); byte[] row=Bytes.toBytes("putRow"); byte[] value=null; this.region=initHRegion(tableName,method,CONF,family); Put put=null; Get get=null; List kvs=null; Result res=null; put=new Put(row); value=Bytes.toBytes("value0"); put.addColumn(family,qualifier,1234567l,value); region.put(put); get=new Get(row); get.addColumn(family,qualifier); get.setMaxVersions(); res=this.region.get(get); kvs=res.getColumnCells(family,qualifier); assertEquals(1,kvs.size()); assertArrayEquals(Bytes.toBytes("value0"),CellUtil.cloneValue(kvs.get(0))); region.flush(true); get=new Get(row); get.addColumn(family,qualifier); get.setMaxVersions(); res=this.region.get(get); kvs=res.getColumnCells(family,qualifier); assertEquals(1,kvs.size()); assertArrayEquals(Bytes.toBytes("value0"),CellUtil.cloneValue(kvs.get(0))); put=new Put(row); value=Bytes.toBytes("value1"); put.addColumn(family,qualifier,1234567l,value); region.put(put); get=new Get(row); get.addColumn(family,qualifier); get.setMaxVersions(); res=this.region.get(get); kvs=res.getColumnCells(family,qualifier); assertEquals(1,kvs.size()); assertArrayEquals(Bytes.toBytes("value1"),CellUtil.cloneValue(kvs.get(0))); region.flush(true); get=new Get(row); get.addColumn(family,qualifier); get.setMaxVersions(); res=this.region.get(get); kvs=res.getColumnCells(family,qualifier); assertEquals(1,kvs.size()); assertArrayEquals(Bytes.toBytes("value1"),CellUtil.cloneValue(kvs.get(0))); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception { String method="testSkipRecoveredEditsReplaySomeIgnored"; TableName tableName=TableName.valueOf(method); byte[] family=Bytes.toBytes("family"); this.region=initHRegion(tableName,method,CONF,family); final WALFactory wals=new WALFactory(CONF,null,method); try { Path regiondir=region.getRegionFileSystem().getRegionDir(); FileSystem fs=region.getRegionFileSystem().getFileSystem(); byte[] regionName=region.getRegionInfo().getEncodedNameAsBytes(); Path recoveredEditsDir=WALSplitter.getRegionDirRecoveredEditsDir(regiondir); long maxSeqId=1050; long minSeqId=1000; for (long i=minSeqId; i <= maxSeqId; i+=10) { Path recoveredEdits=new Path(recoveredEditsDir,String.format("%019d",i)); fs.create(recoveredEdits); WALProvider.Writer writer=wals.createRecoveredEditsWriter(fs,recoveredEdits); long time=System.nanoTime(); WALEdit edit=new WALEdit(); edit.add(new KeyValue(row,family,Bytes.toBytes(i),time,KeyValue.Type.Put,Bytes.toBytes(i))); writer.append(new WAL.Entry(new HLogKey(regionName,tableName,i,time,HConstants.DEFAULT_CLUSTER_ID),edit)); writer.close(); } long recoverSeqId=1030; MonitoredTask status=TaskMonitor.get().createStatus(method); Map maxSeqIdInStores=new TreeMap(Bytes.BYTES_COMPARATOR); for ( Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),recoverSeqId - 1); } long seqId=region.replayRecoveredEditsIfAny(regiondir,maxSeqIdInStores,null,status); assertEquals(maxSeqId,seqId); region.getMVCC().advanceTo(seqId); Get get=new Get(row); Result result=region.get(get); for (long i=minSeqId; i <= maxSeqId; i+=10) { List kvs=result.getColumnCells(family,Bytes.toBytes(i)); if (i < recoverSeqId) { assertEquals(0,kvs.size()); } else { assertEquals(1,kvs.size()); assertArrayEquals(Bytes.toBytes(i),CellUtil.cloneValue(kvs.get(0))); } } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; wals.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetScanner_WithNoFamilies() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] fam2=Bytes.toBytes("fam2"); byte[] fam3=Bytes.toBytes("fam3"); byte[] fam4=Bytes.toBytes("fam4"); byte[][] families={fam1,fam2,fam3,fam4}; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { Put put=new Put(row1); put.addColumn(fam1,null,null); put.addColumn(fam2,null,null); put.addColumn(fam3,null,null); put.addColumn(fam4,null,null); region.put(put); Scan scan=null; HRegion.RegionScannerImpl is=null; scan=new Scan(); scan.addFamily(fam2); scan.addFamily(fam4); is=(RegionScannerImpl)region.getScanner(scan); assertEquals(1,((RegionScannerImpl)is).storeHeap.getHeap().size()); scan=new Scan(); is=(RegionScannerImpl)region.getScanner(scan); assertEquals(families.length - 1,((RegionScannerImpl)is).storeHeap.getHeap().size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testWriteRequestsCounter() throws IOException { byte[] fam=Bytes.toBytes("info"); byte[][] families={fam}; this.region=initHRegion(tableName,method,CONF,families); Assert.assertEquals(0L,region.getWriteRequestsCount()); Put put=new Put(row); put.addColumn(fam,fam,fam); Assert.assertEquals(0L,region.getWriteRequestsCount()); region.put(put); Assert.assertEquals(1L,region.getWriteRequestsCount()); region.put(put); Assert.assertEquals(2L,region.getWriteRequestsCount()); region.put(put); Assert.assertEquals(3L,region.getWriteRequestsCount()); region.delete(new Delete(row)); Assert.assertEquals(4L,region.getWriteRequestsCount()); HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFlushMarkers() throws Exception { String method=name.getMethodName(); TableName tableName=TableName.valueOf(method); byte[] family=Bytes.toBytes("family"); Path logDir=TEST_UTIL.getDataTestDirOnTestFS(method + ".log"); final Configuration walConf=new Configuration(TEST_UTIL.getConfiguration()); FSUtils.setRootDir(walConf,logDir); final WALFactory wals=new WALFactory(walConf,null,method); final WAL wal=wals.getWAL(tableName.getName(),tableName.getNamespace()); this.region=initHRegion(tableName,HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,method,CONF,false,Durability.USE_DEFAULT,wal,family); try { Path regiondir=region.getRegionFileSystem().getRegionDir(); FileSystem fs=region.getRegionFileSystem().getFileSystem(); byte[] regionName=region.getRegionInfo().getEncodedNameAsBytes(); long maxSeqId=3; long minSeqId=0; for (long i=minSeqId; i < maxSeqId; i++) { Put put=new Put(Bytes.toBytes(i)); put.addColumn(family,Bytes.toBytes(i),Bytes.toBytes(i)); region.put(put); region.flush(true); } assertEquals(3,region.getStore(family).getStorefilesCount()); List storeFiles=new ArrayList(3); for ( StoreFile sf : region.getStore(family).getStorefiles()) { storeFiles.add(sf.getPath().getName()); } wal.shutdown(); WAL.Reader reader=WALFactory.createReader(fs,DefaultWALProvider.getCurrentFileName(wal),TEST_UTIL.getConfiguration()); try { List flushDescriptors=new ArrayList(); long lastFlushSeqId=-1; while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } Cell cell=entry.getEdit().getCells().get(0); if (WALEdit.isMetaEditFamily(cell)) { FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(cell); assertNotNull(flushDesc); assertArrayEquals(tableName.getName(),flushDesc.getTableName().toByteArray()); if (flushDesc.getAction() == FlushAction.START_FLUSH) { assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId); } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId); } lastFlushSeqId=flushDesc.getFlushSequenceNumber(); assertArrayEquals(regionName,flushDesc.getEncodedRegionName().toByteArray()); assertEquals(1,flushDesc.getStoreFlushesCount()); StoreFlushDescriptor storeFlushDesc=flushDesc.getStoreFlushes(0); assertArrayEquals(family,storeFlushDesc.getFamilyName().toByteArray()); assertEquals("family",storeFlushDesc.getStoreHomeDir()); if (flushDesc.getAction() == FlushAction.START_FLUSH) { assertEquals(0,storeFlushDesc.getFlushOutputCount()); } else { assertEquals(1,storeFlushDesc.getFlushOutputCount()); assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0))); } flushDescriptors.add(entry); } } assertEquals(3 * 2,flushDescriptors.size()); Path recoveredEditsDir=WALSplitter.getRegionDirRecoveredEditsDir(regiondir); Path recoveredEdits=new Path(recoveredEditsDir,String.format("%019d",1000)); fs.create(recoveredEdits); WALProvider.Writer writer=wals.createRecoveredEditsWriter(fs,recoveredEdits); for ( WAL.Entry entry : flushDescriptors) { writer.append(entry); } writer.close(); } finally { if (null != reader) { try { reader.close(); } catch ( IOException exception) { LOG.warn("Problem closing wal: " + exception.getMessage()); LOG.debug("exception details",exception); } } } region.close(); region=HRegion.openHRegion(region,null); for (long i=minSeqId; i < maxSeqId; i++) { Get get=new Get(Bytes.toBytes(i)); Result result=region.get(get); byte[] value=result.getValue(family,Bytes.toBytes(i)); assertArrayEquals(Bytes.toBytes(i),value); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; wals.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testAllColumnsWithBloomFilter() throws IOException { byte[] TABLE=Bytes.toBytes("testAllColumnsWithBloomFilter"); byte[] FAMILY=Bytes.toBytes("family"); HColumnDescriptor hcd=new HColumnDescriptor(FAMILY).setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(hcd); HRegionInfo info=new HRegionInfo(htd.getTableName(),null,null,false); this.region=TEST_UTIL.createLocalHRegion(info,htd); try { byte row[]=Bytes.toBytes("row:" + 0); byte column[]=Bytes.toBytes("column:" + 0); Put put=new Put(row); put.setDurability(Durability.SKIP_WAL); for (long idx=1; idx <= 4; idx++) { put.addColumn(FAMILY,column,idx,Bytes.toBytes("value-version-" + idx)); } region.put(put); region.flush(true); Get get=new Get(row); get.setMaxVersions(); Cell[] kvs=region.get(get).rawCells(); assertEquals(4,kvs.length); checkOneCell(kvs[0],FAMILY,0,0,4); checkOneCell(kvs[1],FAMILY,0,0,3); checkOneCell(kvs[2],FAMILY,0,0,2); checkOneCell(kvs[3],FAMILY,0,0,1); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testMerge() throws IOException { byte[][] families={fam1,fam2,fam3}; Configuration hc=initSplit(); String method=this.getName(); this.region=initHRegion(tableName,method,hc,families); try { LOG.info("" + HBaseTestCase.addContent(region,fam3)); region.flush(true); region.compactStores(); byte[] splitRow=region.checkSplit(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); HRegion[] subregions=splitRegion(region,splitRow); try { for (int i=0; i < subregions.length; i++) { HRegion.openHRegion(subregions[i],null); subregions[i].compactStores(); } Path oldRegionPath=region.getRegionFileSystem().getRegionDir(); Path oldRegion1=subregions[0].getRegionFileSystem().getRegionDir(); Path oldRegion2=subregions[1].getRegionFileSystem().getRegionDir(); long startTime=System.currentTimeMillis(); region=HRegion.mergeAdjacent(subregions[0],subregions[1]); LOG.info("Merge regions elapsed time: " + ((System.currentTimeMillis() - startTime) / 1000.0)); FILESYSTEM.delete(oldRegion1,true); FILESYSTEM.delete(oldRegion2,true); FILESYSTEM.delete(oldRegionPath,true); LOG.info("splitAndMerge completed."); } finally { for (int i=0; i < subregions.length; i++) { try { HBaseTestingUtility.closeRegionAndWAL(subregions[i]); } catch ( IOException e) { } } } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testScanner_ExplicitColumns_FromMemStore_EnforceVersions() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] qf1=Bytes.toBytes("qualifier1"); byte[] qf2=Bytes.toBytes("qualifier2"); byte[] fam1=Bytes.toBytes("fam1"); byte[][] families={fam1}; long ts1=System.currentTimeMillis(); long ts2=ts1 + 1; long ts3=ts1 + 2; String method=this.getName(); this.region=initHRegion(tableName,method,CONF,families); try { Put put=null; KeyValue kv13=new KeyValue(row1,fam1,qf1,ts3,KeyValue.Type.Put,null); KeyValue kv12=new KeyValue(row1,fam1,qf1,ts2,KeyValue.Type.Put,null); KeyValue kv11=new KeyValue(row1,fam1,qf1,ts1,KeyValue.Type.Put,null); KeyValue kv23=new KeyValue(row1,fam1,qf2,ts3,KeyValue.Type.Put,null); KeyValue kv22=new KeyValue(row1,fam1,qf2,ts2,KeyValue.Type.Put,null); KeyValue kv21=new KeyValue(row1,fam1,qf2,ts1,KeyValue.Type.Put,null); put=new Put(row1); put.add(kv13); put.add(kv12); put.add(kv11); put.add(kv23); put.add(kv22); put.add(kv21); region.put(put); List expected=new ArrayList(); expected.add(kv13); expected.add(kv12); Scan scan=new Scan(row1); scan.addColumn(fam1,qf1); scan.setMaxVersions(MAX_VERSIONS); List actual=new ArrayList(); InternalScanner scanner=region.getScanner(scan); boolean hasNext=scanner.next(actual); assertEquals(false,hasNext); for (int i=0; i < expected.size(); i++) { assertEquals(expected.get(i),actual.get(i)); } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDelete_multiDeleteColumn() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] fam1=Bytes.toBytes("fam1"); byte[] qual=Bytes.toBytes("qualifier"); byte[] value=Bytes.toBytes("value"); Put put=new Put(row1); put.addColumn(fam1,qual,(long)1,value); put.addColumn(fam1,qual,(long)2,value); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam1); try { region.put(put); Delete delete=new Delete(row1); delete.addColumn(fam1,qual); delete.addColumn(fam1,qual); region.delete(delete); Get get=new Get(row1); get.addFamily(fam1); Result r=region.get(get); assertEquals(0,r.size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testOpenRegionWrittenToWALForLogReplay() throws Exception { final ServerName serverName=ServerName.valueOf("testOpenRegionWrittenToWALForLogReplay",100,42); final RegionServerServices rss=spy(TEST_UTIL.createMockRegionServerService(serverName)); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWALForLogReplay")); htd.addFamily(new HColumnDescriptor(fam1)); htd.addFamily(new HColumnDescriptor(fam2)); HRegionInfo hri=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_BYTE_ARRAY,HConstants.EMPTY_BYTE_ARRAY); HRegion region=HBaseTestingUtility.createRegionAndWAL(hri,TEST_UTIL.getDataTestDir(),TEST_UTIL.getConfiguration(),htd); assertNotNull(region); region.put(new Put(Bytes.toBytes("a")).addColumn(fam1,fam1,fam1)); region.flush(true); HBaseTestingUtility.closeRegionAndWAL(region); ArgumentCaptor editCaptor=ArgumentCaptor.forClass(WALEdit.class); WAL wal=mockWAL(); when(rss.getWAL((HRegionInfo)any())).thenReturn(wal); HashMap recoveringRegions=Maps.newHashMap(); recoveringRegions.put(region.getRegionInfo().getEncodedName(),null); when(rss.getRecoveringRegions()).thenReturn(recoveringRegions); try { Configuration conf=new Configuration(TEST_UTIL.getConfiguration()); conf.set(HConstants.REGION_IMPL,HRegionWithSeqId.class.getName()); region=HRegion.openHRegion(hri,htd,rss.getWAL(hri),conf,rss,null); verify(wal,times(0)).append((HTableDescriptor)any(),(HRegionInfo)any(),(WALKey)any(),editCaptor.capture(),anyBoolean()); new FinishRegionRecoveringHandler(rss,region.getRegionInfo().getEncodedName(),"/foo").prepare().process(); verify(wal,times(1)).append((HTableDescriptor)any(),(HRegionInfo)any(),(WALKey)any(),editCaptor.capture(),anyBoolean()); WALEdit edit=editCaptor.getValue(); assertNotNull(edit); assertNotNull(edit.getCells()); assertEquals(1,edit.getCells().size()); RegionEventDescriptor desc=WALEdit.getRegionEventDescriptor(edit.getCells().get(0)); assertNotNull(desc); LOG.info("RegionEventDescriptor from WAL: " + desc); assertEquals(RegionEventDescriptor.EventType.REGION_OPEN,desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(),htd.getName())); assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName,ProtobufUtil.toServerName(desc.getServer())); assertEquals(2,desc.getStoresCount()); StoreDescriptor store=desc.getStores(0); assertTrue(Bytes.equals(store.getFamilyName().toByteArray(),fam1)); assertEquals(store.getStoreHomeDir(),Bytes.toString(fam1)); assertEquals(1,store.getStoreFileCount()); assertFalse(store.getStoreFile(0).contains("/")); store=desc.getStores(1); assertTrue(Bytes.equals(store.getFamilyName().toByteArray(),fam2)); assertEquals(store.getStoreHomeDir(),Bytes.toString(fam2)); assertEquals(0,store.getStoreFileCount()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() throws IOException { byte[] row1=Bytes.toBytes("row1"); byte[] row2=Bytes.toBytes("row2"); byte[] row3=Bytes.toBytes("row3"); byte[] row4=Bytes.toBytes("row4"); byte[] cf1=Bytes.toBytes("CF1"); byte[] cf2=Bytes.toBytes("CF2"); byte[] cf3=Bytes.toBytes("CF3"); byte[] cf4=Bytes.toBytes("CF4"); byte[][] families={cf1,cf2,cf3,cf4}; byte[] col=Bytes.toBytes("C"); long ts=1; String method=this.getName(); HBaseConfiguration conf=new HBaseConfiguration(); conf.setInt("hbase.hstore.compactionThreshold",10000); this.region=initHRegion(tableName,method,conf,families); try { KeyValue kv1=new KeyValue(row1,cf1,col,ts,KeyValue.Type.Put,null); KeyValue kv2=new KeyValue(row2,cf2,col,ts,KeyValue.Type.Put,null); KeyValue kv3=new KeyValue(row3,cf3,col,ts,KeyValue.Type.Put,null); KeyValue kv4=new KeyValue(row4,cf4,col,ts,KeyValue.Type.Put,null); Put put=new Put(row1); put.add(kv1); region.put(put); region.flush(true); put=new Put(row2); put.add(kv2); region.put(put); region.flush(true); put=new Put(row3); put.add(kv3); region.put(put); region.flush(true); put=new Put(row4); put.add(kv4); region.put(put); Scan scan=new Scan(row4); scan.setReversed(true); scan.setBatch(10); InternalScanner scanner=region.getScanner(scan); List currRow=new ArrayList(); boolean hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row4,0,row4.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row3,0,row3.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row2,0,row2.length)); assertTrue(hasNext); currRow.clear(); hasNext=scanner.next(currRow); assertEquals(1,currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(),currRow.get(0).getRowOffset(),currRow.get(0).getRowLength(),row1,0,row1.length)); assertFalse(hasNext); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testIncrementTimestampsAreMonotonic() throws IOException { HRegion region=initHRegion(tableName,name.getMethodName(),CONF,fam1); ManualEnvironmentEdge edge=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); edge.setValue(10); Increment inc=new Increment(row); inc.setDurability(Durability.SKIP_WAL); inc.addColumn(fam1,qual1,1L); region.increment(inc); Result result=region.get(new Get(row)); Cell c=result.getColumnLatestCell(fam1,qual1); assertNotNull(c); assertEquals(c.getTimestamp(),10L); edge.setValue(1); region.increment(inc); result=region.get(new Get(row)); c=result.getColumnLatestCell(fam1,qual1); assertEquals(c.getTimestamp(),10L); assertEquals(Bytes.toLong(c.getValueArray(),c.getValueOffset(),c.getValueLength()),2L); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testGet_Empty() throws IOException { byte[] row=Bytes.toBytes("row"); byte[] fam=Bytes.toBytes("fam"); String method=this.getName(); this.region=initHRegion(tableName,method,CONF,fam); try { Get get=new Get(row); get.addFamily(fam); Result r=region.get(get); assertTrue(r.isEmpty()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

InternalCallVerifier BooleanVerifier 
/** * Testcase to cover bug-fix for HBASE-2823 Ensures correct delete when * issuing delete row on columns with bloom filter set to row+col * (BloomType.ROWCOL) */ @Test public void testDeleteRowWithBloomFilter() throws IOException { byte[] familyName=Bytes.toBytes("familyName"); HColumnDescriptor hcd=new HColumnDescriptor(familyName).setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(hcd); HRegionInfo info=new HRegionInfo(htd.getTableName(),null,null,false); this.region=TEST_UTIL.createLocalHRegion(info,htd); try { byte row[]=Bytes.toBytes("row1"); byte col[]=Bytes.toBytes("col1"); Put put=new Put(row); put.addColumn(familyName,col,(long)1,Bytes.toBytes("SomeRandomValue")); region.put(put); region.flush(true); Delete del=new Delete(row); region.delete(del); region.flush(true); Get get=new Get(row); get.addColumn(familyName,col); Cell[] keyValues=region.get(get).rawCells(); assertTrue(keyValues.length == 0); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region=null; } }

Class: org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testNonIdempotentOpsWithRetries() throws IOException { Path rootDir=TEST_UTIL.getDataTestDirOnTestFS("testOnDiskRegionCreation"); FileSystem fs=TEST_UTIL.getTestFileSystem(); Configuration conf=TEST_UTIL.getConfiguration(); HRegionInfo hri=new HRegionInfo(TableName.valueOf("TestTable")); HRegionFileSystem regionFs=HRegionFileSystem.createRegionOnFileSystem(conf,fs,rootDir,hri); assertTrue(fs.exists(regionFs.getRegionDir())); regionFs=new HRegionFileSystem(conf,new MockFileSystemForCreate(),null,null); boolean result=regionFs.createDir(new Path("/foo/bar")); assertTrue("Couldn't create the directory",result); regionFs=new HRegionFileSystem(conf,new MockFileSystem(),null,null); result=regionFs.rename(new Path("/foo/bar"),new Path("/foo/bar2")); assertTrue("Couldn't rename the directory",result); regionFs=new HRegionFileSystem(conf,new MockFileSystem(),null,null); result=regionFs.deleteDir(new Path("/foo/bar")); assertTrue("Couldn't delete the directory",result); fs.delete(rootDir,true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTempAndCommit() throws IOException { Path rootDir=TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit"); FileSystem fs=TEST_UTIL.getTestFileSystem(); Configuration conf=TEST_UTIL.getConfiguration(); String familyName="cf"; HRegionInfo hri=new HRegionInfo(TableName.valueOf("TestTable")); HRegionFileSystem regionFs=HRegionFileSystem.createRegionOnFileSystem(conf,fs,rootDir,hri); Collection storeFiles=regionFs.getStoreFiles(familyName); assertEquals(0,storeFiles != null ? storeFiles.size() : 0); Path buildPath=regionFs.createTempName(); fs.createNewFile(buildPath); storeFiles=regionFs.getStoreFiles(familyName); assertEquals(0,storeFiles != null ? storeFiles.size() : 0); Path dstPath=regionFs.commitStoreFile(familyName,buildPath); storeFiles=regionFs.getStoreFiles(familyName); assertEquals(0,storeFiles != null ? storeFiles.size() : 0); assertFalse(fs.exists(buildPath)); fs.delete(rootDir,true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOnDiskRegionCreation() throws IOException { Path rootDir=TEST_UTIL.getDataTestDirOnTestFS("testOnDiskRegionCreation"); FileSystem fs=TEST_UTIL.getTestFileSystem(); Configuration conf=TEST_UTIL.getConfiguration(); HRegionInfo hri=new HRegionInfo(TableName.valueOf("TestTable")); HRegionFileSystem regionFs=HRegionFileSystem.createRegionOnFileSystem(conf,fs,FSUtils.getTableDir(rootDir,hri.getTable()),hri); Path regionDir=regionFs.getRegionDir(); assertTrue("The region folder should be created",fs.exists(regionDir)); HRegionInfo hriVerify=HRegionFileSystem.loadRegionInfoFileContent(fs,regionDir); assertEquals(hri,hriVerify); regionFs=HRegionFileSystem.openRegionFromFileSystem(conf,fs,FSUtils.getTableDir(rootDir,hri.getTable()),hri,false); assertEquals(regionDir,regionFs.getRegionDir()); HRegionFileSystem.deleteRegionFromFileSystem(conf,fs,FSUtils.getTableDir(rootDir,hri.getTable()),hri); assertFalse("The region folder should be removed",fs.exists(regionDir)); fs.delete(rootDir,true); }

Class: org.apache.hadoop.hbase.regionserver.TestHRegionInfo

InternalCallVerifier EqualityVerifier 
@Test public void testRegionDetailsForDisplay() throws IOException { byte[] startKey=new byte[]{0x01,0x01,0x02,0x03}; byte[] endKey=new byte[]{0x01,0x01,0x02,0x04}; Configuration conf=new Configuration(); conf.setBoolean("hbase.display.keys",false); HRegionInfo h=new HRegionInfo(TableName.valueOf("foo"),startKey,endKey); checkEquality(h,conf); h=new HRegionInfo(TableName.valueOf("foo"),startKey,endKey,false,System.currentTimeMillis(),1); checkEquality(h,conf); Assert.assertArrayEquals(HRegionInfo.HIDDEN_END_KEY,HRegionInfo.getEndKeyForDisplay(h,conf)); Assert.assertArrayEquals(HRegionInfo.HIDDEN_START_KEY,HRegionInfo.getStartKeyForDisplay(h,conf)); RegionState state=new RegionState(h,RegionState.State.OPEN); String descriptiveNameForDisplay=HRegionInfo.getDescriptiveNameFromRegionStateForDisplay(state,conf); checkDescriptiveNameEquality(descriptiveNameForDisplay,state.toDescriptiveString(),startKey); conf.setBoolean("hbase.display.keys",true); Assert.assertArrayEquals(endKey,HRegionInfo.getEndKeyForDisplay(h,conf)); Assert.assertArrayEquals(startKey,HRegionInfo.getStartKeyForDisplay(h,conf)); Assert.assertEquals(state.toDescriptiveString(),HRegionInfo.getDescriptiveNameFromRegionStateForDisplay(state,conf)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvert(){ TableName tableName=TableName.valueOf("ns1:table1"); byte[] startKey=Bytes.toBytes("startKey"); byte[] endKey=Bytes.toBytes("endKey"); boolean split=false; long regionId=System.currentTimeMillis(); int replicaId=42; HRegionInfo hri=new HRegionInfo(tableName,startKey,endKey,split,regionId,replicaId); HRegionInfo convertedHri=HRegionInfo.convert(HRegionInfo.convert(hri)); assertEquals(hri,convertedHri); RegionInfo info=RegionInfo.newBuilder().setTableName(HBaseProtos.TableName.newBuilder().setQualifier(ByteString.copyFrom(tableName.getQualifier())).setNamespace(ByteString.copyFrom(tableName.getNamespace())).build()).setStartKey(ByteString.copyFrom(startKey)).setEndKey(ByteString.copyFrom(endKey)).setSplit(split).setRegionId(regionId).build(); convertedHri=HRegionInfo.convert(info); HRegionInfo expectedHri=new HRegionInfo(tableName,startKey,endKey,split,regionId,0); assertEquals(expectedHri,convertedHri); }

InternalCallVerifier BooleanVerifier 
@Test public void testComparator(){ TableName tablename=TableName.valueOf("comparatorTablename"); byte[] empty=new byte[0]; HRegionInfo older=new HRegionInfo(tablename,empty,empty,false,0L); HRegionInfo newer=new HRegionInfo(tablename,empty,empty,false,1L); assertTrue(older.compareTo(newer) < 0); assertTrue(newer.compareTo(older) > 0); assertTrue(older.compareTo(older) == 0); assertTrue(newer.compareTo(newer) == 0); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException { HBaseTestingUtility htu=new HBaseTestingUtility(); HRegionInfo hri=HRegionInfo.FIRST_META_REGIONINFO; Path basedir=htu.getDataTestDir(); FSTableDescriptors fsTableDescriptors=new FSTableDescriptors(htu.getConfiguration()); HRegion r=HBaseTestingUtility.createRegionAndWAL(hri,basedir,htu.getConfiguration(),fsTableDescriptors.get(TableName.META_TABLE_NAME)); long modtime=getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); r=HRegion.openHRegion(basedir,hri,fsTableDescriptors.get(TableName.META_TABLE_NAME),null,htu.getConfiguration()); long modtime2=getModTime(r); assertEquals(modtime,modtime2); HRegionInfo deserializedHri=HRegionFileSystem.loadRegionInfoFileContent(r.getRegionFileSystem().getFileSystem(),r.getRegionFileSystem().getRegionDir()); assertTrue(hri.equals(deserializedHri)); HBaseTestingUtility.closeRegionAndWAL(r); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testParseName() throws IOException { TableName tableName=TableName.valueOf("testParseName"); byte[] startKey=Bytes.toBytes("startKey"); long regionId=System.currentTimeMillis(); int replicaId=42; byte[] regionName=HRegionInfo.createRegionName(tableName,startKey,regionId,false); byte[][] fields=HRegionInfo.parseRegionName(regionName); assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(),fields[0]); assertArrayEquals(Bytes.toString(fields[1]),startKey,fields[1]); assertArrayEquals(Bytes.toString(fields[2]),Bytes.toBytes(Long.toString(regionId)),fields[2]); assertEquals(3,fields.length); regionName=HRegionInfo.createRegionName(tableName,startKey,regionId,replicaId,false); fields=HRegionInfo.parseRegionName(regionName); assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(),fields[0]); assertArrayEquals(Bytes.toString(fields[1]),startKey,fields[1]); assertArrayEquals(Bytes.toString(fields[2]),Bytes.toBytes(Long.toString(regionId)),fields[2]); assertArrayEquals(Bytes.toString(fields[3]),Bytes.toBytes(String.format(HRegionInfo.REPLICA_ID_FORMAT,replicaId)),fields[3]); }

InternalCallVerifier BooleanVerifier 
@Test public void testLastRegionCompare(){ HTableDescriptor tableDesc=new HTableDescriptor(TableName.valueOf("testtable")); HRegionInfo hrip=new HRegionInfo(tableDesc.getTableName(),Bytes.toBytes("a"),new byte[0]); HRegionInfo hric=new HRegionInfo(tableDesc.getTableName(),Bytes.toBytes("a"),Bytes.toBytes("b")); assertTrue(hrip.compareTo(hric) > 0); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testContainsRange(){ HTableDescriptor tableDesc=new HTableDescriptor(TableName.valueOf("testtable")); HRegionInfo hri=new HRegionInfo(tableDesc.getTableName(),Bytes.toBytes("a"),Bytes.toBytes("g")); assertTrue(hri.containsRange(Bytes.toBytes("a"),Bytes.toBytes("a"))); assertTrue(hri.containsRange(Bytes.toBytes("b"),Bytes.toBytes("c"))); assertTrue(hri.containsRange(Bytes.toBytes("a"),Bytes.toBytes("c"))); assertTrue(hri.containsRange(Bytes.toBytes("c"),Bytes.toBytes("c"))); assertFalse(hri.containsRange(Bytes.toBytes("a"),Bytes.toBytes("g"))); assertFalse(hri.containsRange(Bytes.toBytes("g"),Bytes.toBytes("g"))); assertFalse(hri.containsRange(Bytes.toBytes("z"),Bytes.toBytes("z"))); try { hri.containsRange(Bytes.toBytes("z"),Bytes.toBytes("a")); fail("Invalid range did not throw IAE"); } catch ( IllegalArgumentException iae) { } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testPb() throws DeserializationException { HRegionInfo hri=HRegionInfo.FIRST_META_REGIONINFO; byte[] bytes=hri.toByteArray(); HRegionInfo pbhri=HRegionInfo.parseFrom(bytes); assertTrue(hri.equals(pbhri)); }

Class: org.apache.hadoop.hbase.regionserver.TestHRegionOnCluster

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testDataCorrectnessReplayingRecoveredEdits() throws Exception { final int NUM_MASTERS=1; final int NUM_RS=3; Admin hbaseAdmin=null; TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); try { final TableName TABLENAME=TableName.valueOf("testDataCorrectnessReplayingRecoveredEdits"); final byte[] FAMILY=Bytes.toBytes("family"); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); HTableDescriptor desc=new HTableDescriptor(TABLENAME); desc.addFamily(new HColumnDescriptor(FAMILY)); hbaseAdmin=master.getConnection().getAdmin(); hbaseAdmin.createTable(desc); assertTrue(hbaseAdmin.isTableAvailable(TABLENAME)); LOG.info("Loading r1 to v1 into " + TABLENAME); Table table=TEST_UTIL.getConnection().getTable(TABLENAME); putDataAndVerify(table,"r1",FAMILY,"v1",1); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); HRegionInfo regionInfo; try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(TABLENAME)){ regionInfo=locator.getRegionLocation(Bytes.toBytes("r1")).getRegionInfo(); } int originServerNum=cluster.getServerWith(regionInfo.getRegionName()); HRegionServer originServer=cluster.getRegionServer(originServerNum); int targetServerNum=(originServerNum + 1) % NUM_RS; HRegionServer targetServer=cluster.getRegionServer(targetServerNum); assertFalse(originServer.equals(targetServer)); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); LOG.info("Moving " + regionInfo.getEncodedName() + " to "+ targetServer.getServerName()); hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),Bytes.toBytes(targetServer.getServerName().getServerName())); do { Thread.sleep(1); } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum); LOG.info("Loading r2 to v2 into " + TABLENAME); putDataAndVerify(table,"r2",FAMILY,"v2",2); TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); LOG.info("Moving " + regionInfo.getEncodedName() + " to "+ originServer.getServerName()); hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),Bytes.toBytes(originServer.getServerName().getServerName())); do { Thread.sleep(1); } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum); LOG.info("Loading r3 to v3 into " + TABLENAME); putDataAndVerify(table,"r3",FAMILY,"v3",3); LOG.info("Killing target server " + targetServer.getServerName()); targetServer.kill(); cluster.getRegionServerThreads().get(targetServerNum).join(); while (master.getServerManager().areDeadServersInProgress()) { Thread.sleep(5); } LOG.info("Killing origin server " + targetServer.getServerName()); originServer.kill(); cluster.getRegionServerThreads().get(originServerNum).join(); LOG.info("Loading r4 to v4 into " + TABLENAME); putDataAndVerify(table,"r4",FAMILY,"v4",4); } finally { if (hbaseAdmin != null) hbaseAdmin.close(); TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.regionserver.TestHRegionReplayEvents

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSeqIdsFromReplay() throws IOException { String method=name.getMethodName(); byte[] tableName=Bytes.toBytes(method); byte[] family=Bytes.toBytes("family"); HRegion region=initHRegion(tableName,method,family); try { long readPoint=region.getMVCC().getReadPoint(); long origSeqId=readPoint + 100; Put put=new Put(row).addColumn(family,row,row); put.setDurability(Durability.SKIP_WAL); replay(region,put,origSeqId); assertGet(region,family,row); assertEquals(origSeqId,region.getReadPoint(null)); put=new Put(row2).addColumn(family,row2,row2); put.setDurability(Durability.SKIP_WAL); replay(region,put,origSeqId - 50); assertGet(region,family,row2); } finally { region.close(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRefreshStoreFiles() throws IOException { assertEquals(0,primaryRegion.getStoreFileList(families).size()); assertEquals(0,secondaryRegion.getStoreFileList(families).size()); secondaryRegion.refreshStoreFiles(); assertEquals(0,secondaryRegion.getStoreFileList(families).size()); putDataWithFlushes(primaryRegion,100,100,0); int numRows=100; secondaryRegion.refreshStoreFiles(); assertPathListsEqual(primaryRegion.getStoreFileList(families),secondaryRegion.getStoreFileList(families)); assertEquals(families.length,secondaryRegion.getStoreFileList(families).size()); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); putDataWithFlushes(primaryRegion,100,300,0); numRows=300; secondaryRegion.refreshStoreFiles(); assertPathListsEqual(primaryRegion.getStoreFileList(families),secondaryRegion.getStoreFileList(families)); assertEquals(families.length * 4,secondaryRegion.getStoreFileList(families).size()); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); if (FSUtils.WINDOWS) { return; } primaryRegion.compactStores(); List regions=new ArrayList(); regions.add(primaryRegion); when(rss.getOnlineRegions()).thenReturn(regions); CompactedHFilesDischarger cleaner=new CompactedHFilesDischarger(100,null,rss,false); cleaner.chore(); secondaryRegion.refreshStoreFiles(); assertPathListsEqual(primaryRegion.getStoreFileList(families),secondaryRegion.getStoreFileList(families)); assertEquals(families.length,secondaryRegion.getStoreFileList(families).size()); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Replaying edits in secondary"); assertTrue(secondaryRegion.getMemstoreSize() == 0); putDataWithFlushes(primaryRegion,400,400,0); numRows=400; reader=createWALReaderForPrimary(); while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flush=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flush != null) { } else { replayEdit(secondaryRegion,entry); } } assertTrue(secondaryRegion.getMemstoreSize() > 0); secondaryRegion.refreshStoreFiles(); assertTrue(secondaryRegion.getMemstoreSize() == 0); LOG.info("-- Verifying edits from primary"); verifyData(primaryRegion,0,numRows,cq,families); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); }

IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReplayFlushSeqIds() throws IOException { int start=0; LOG.info("-- Writing some data to primary from " + start + " to "+ (start + 100)); putData(primaryRegion,Durability.SYNC_WAL,start,100,cq,families); LOG.info("-- Flushing primary, creating 3 files for 3 stores"); primaryRegion.flush(true); reader=createWALReaderForPrimary(); long flushSeqId=-1; LOG.info("-- Replaying flush events in secondary"); while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { LOG.info("-- Replaying flush start in secondary"); secondaryRegion.replayWALFlushStartMarker(flushDesc); flushSeqId=flushDesc.getFlushSequenceNumber(); } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { LOG.info("-- Replaying flush commit in secondary"); secondaryRegion.replayWALFlushCommitMarker(flushDesc); assertEquals(flushSeqId,flushDesc.getFlushSequenceNumber()); } } } long readPoint=secondaryRegion.getMVCC().getReadPoint(); assertEquals(flushSeqId,readPoint); verifyData(secondaryRegion,0,100,cq,families); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests replaying region open markers from primary region. Checks whether the files are picked up */ @Test public void testReplayBulkLoadEvent() throws IOException { LOG.info("testReplayBulkLoadEvent starts"); putDataWithFlushes(primaryRegion,100,0,100); primaryRegion.close(); primaryRegion=HRegion.openHRegion(rootDir,primaryHri,htd,walPrimary,CONF,rss,null); Random random=new Random(); byte[] randomValues=new byte[20]; random.nextBytes(randomValues); Path testPath=TEST_UTIL.getDataTestDirOnTestFS(); List> familyPaths=new ArrayList>(); int expectedLoadFileCount=0; for ( byte[] family : families) { familyPaths.add(new Pair(family,createHFileForFamilies(testPath,family,randomValues))); expectedLoadFileCount++; } primaryRegion.bulkLoadHFiles(familyPaths,false,null); reader=createWALReaderForPrimary(); LOG.info("-- Replaying edits and region events in secondary"); BulkLoadDescriptor bulkloadEvent=null; while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } bulkloadEvent=WALEdit.getBulkLoadDescriptor(entry.getEdit().getCells().get(0)); if (bulkloadEvent != null) { break; } } assertTrue(bulkloadEvent != null); assertEquals(expectedLoadFileCount,bulkloadEvent.getStoresCount()); secondaryRegion.replayWALBulkLoadEventMarker(bulkloadEvent); List storeFileName=new ArrayList(); for ( StoreDescriptor storeDesc : bulkloadEvent.getStoresList()) { storeFileName.addAll(storeDesc.getStoreFileList()); } for ( Store s : secondaryRegion.getStores()) { for ( StoreFile sf : s.getStorefiles()) { storeFileName.remove(sf.getPath().getName()); } } assertTrue("Found some store file isn't loaded:" + storeFileName,storeFileName.isEmpty()); LOG.info("-- Verifying edits from secondary"); for ( byte[] family : families) { assertGet(secondaryRegion,family,randomValues); } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests the case where we prepare a flush with some seqId and we receive a flush commit marker * larger than the previous flush start marker. */ @Test public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOException { putDataWithFlushes(primaryRegion,100,100,100); int numRows=200; reader=createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); FlushDescriptor startFlushDesc=null; FlushDescriptor commitFlushDesc=null; int lastReplayed=0; while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { if (startFlushDesc == null) { LOG.info("-- Replaying flush start in secondary"); startFlushDesc=flushDesc; PrepareFlushResult result=secondaryRegion.replayWALFlushStartMarker(startFlushDesc); assertNull(result.result); } } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { commitFlushDesc=FlushDescriptor.newBuilder(flushDesc).setFlushSequenceNumber(flushDesc.getFlushSequenceNumber() + 50).build(); } verifyData(secondaryRegion,0,lastReplayed + 1,cq,families); } else { lastReplayed=replayEdit(secondaryRegion,entry); } } verifyData(secondaryRegion,0,numRows,cq,families); int expectedStoreFileCount=0; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } long regionMemstoreSize=secondaryRegion.getMemstoreSize(); LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START"+ startFlushDesc); assertTrue(commitFlushDesc.getFlushSequenceNumber() > startFlushDesc.getFlushSequenceNumber()); LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc); secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc); expectedStoreFileCount++; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } Store store=secondaryRegion.getStore(Bytes.toBytes("cf1")); long newFlushableSize=store.getFlushableSize(); assertTrue(newFlushableSize > 0); long newRegionMemstoreSize=secondaryRegion.getMemstoreSize(); assertTrue(newRegionMemstoreSize > 0); assertTrue(regionMemstoreSize > newRegionMemstoreSize); assertNull(secondaryRegion.getPrepareFlushResult()); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Verifying edits from primary."); verifyData(primaryRegion,0,numRows,cq,families); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests cases where we prepare a flush with some seqId and we receive other flush start markers * equal to, greater or less than the previous flush start marker. */ @Test public void testReplayFlushStartMarkers() throws IOException { putDataWithFlushes(primaryRegion,100,100,100); int numRows=200; reader=createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); FlushDescriptor startFlushDesc=null; int lastReplayed=0; while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { Store store=secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize=store.getMemStoreSize(); long regionMemstoreSize=secondaryRegion.getMemstoreSize(); long storeFlushableSize=store.getFlushableSize(); if (flushDesc.getAction() == FlushAction.START_FLUSH) { startFlushDesc=flushDesc; LOG.info("-- Replaying flush start in secondary"); PrepareFlushResult result=secondaryRegion.replayWALFlushStartMarker(startFlushDesc); assertNull(result.result); assertEquals(result.flushOpSeqId,startFlushDesc.getFlushSequenceNumber()); assertTrue(regionMemstoreSize > 0); assertTrue(storeFlushableSize > 0); long newStoreMemstoreSize=store.getMemStoreSize(); LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize)); assertTrue(storeMemstoreSize > newStoreMemstoreSize); verifyData(secondaryRegion,0,lastReplayed + 1,cq,families); } verifyData(secondaryRegion,0,lastReplayed + 1,cq,families); } else { lastReplayed=replayEdit(secondaryRegion,entry); } } verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Replaying same flush start in secondary again"); PrepareFlushResult result=secondaryRegion.replayWALFlushStartMarker(startFlushDesc); assertNull(result); assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId,startFlushDesc.getFlushSequenceNumber()); assertTrue(secondaryRegion.getMemstoreSize() > 0); verifyData(secondaryRegion,0,numRows,cq,families); FlushDescriptor startFlushDescSmallerSeqId=clone(startFlushDesc,startFlushDesc.getFlushSequenceNumber() - 50); LOG.info("-- Replaying same flush start in secondary again " + startFlushDescSmallerSeqId); result=secondaryRegion.replayWALFlushStartMarker(startFlushDescSmallerSeqId); assertNull(result); assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId,startFlushDesc.getFlushSequenceNumber()); assertTrue(secondaryRegion.getMemstoreSize() > 0); verifyData(secondaryRegion,0,numRows,cq,families); FlushDescriptor startFlushDescLargerSeqId=clone(startFlushDesc,startFlushDesc.getFlushSequenceNumber() + 50); LOG.info("-- Replaying same flush start in secondary again " + startFlushDescLargerSeqId); result=secondaryRegion.replayWALFlushStartMarker(startFlushDescLargerSeqId); assertNull(result); assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId,startFlushDesc.getFlushSequenceNumber()); assertTrue(secondaryRegion.getMemstoreSize() > 0); verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Verifying edits from primary."); verifyData(primaryRegion,0,numRows,cq,families); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests a case where we replay only a flush start marker, then the region is closed. This region * should not block indefinitely */ @Test(timeout=60000) public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException { int start=0; LOG.info("-- Writing some data to primary from " + start + " to "+ (start + 100)); putData(primaryRegion,Durability.SYNC_WAL,start,100,cq,families); LOG.info("-- Flushing primary, creating 3 files for 3 stores"); primaryRegion.flush(true); reader=createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { LOG.info("-- Replaying flush start in secondary"); secondaryRegion.replayWALFlushStartMarker(flushDesc); } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { LOG.info("-- NOT Replaying flush commit in secondary"); } } else { replayEdit(secondaryRegion,entry); } } assertTrue(rss.getRegionServerAccounting().getGlobalMemstoreSize() > 0); secondaryRegion.close(); assertEquals(0,rss.getRegionServerAccounting().getGlobalMemstoreSize()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests the case where we replay a region open event after a flush start but before receiving * flush commit */ @Test public void testReplayRegionOpenEventAfterFlushStart() throws IOException { putDataWithFlushes(primaryRegion,100,100,100); int numRows=200; primaryRegion.close(); primaryRegion=HRegion.openHRegion(rootDir,primaryHri,htd,walPrimary,CONF,rss,null); reader=createWALReaderForPrimary(); List regionEvents=Lists.newArrayList(); LOG.info("-- Replaying edits and region events in secondary"); while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); RegionEventDescriptor regionEventDesc=WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { secondaryRegion.replayWALFlushStartMarker(flushDesc); } } else if (regionEventDesc != null) { regionEvents.add(regionEventDesc); } else { replayEdit(secondaryRegion,entry); } } verifyData(secondaryRegion,0,numRows,cq,families); assertEquals(3,regionEvents.size()); int expectedStoreFileCount=0; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } LOG.info("Testing replaying region open event " + regionEvents.get(2)); secondaryRegion.replayWALRegionEventMarker(regionEvents.get(2)); expectedStoreFileCount=2; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } Store store=secondaryRegion.getStore(Bytes.toBytes("cf1")); long newSnapshotSize=store.getSnapshotSize(); assertTrue(newSnapshotSize == 0); long newRegionMemstoreSize=secondaryRegion.getMemstoreSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Verifying edits from primary."); verifyData(primaryRegion,0,numRows,cq,families); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReplayFlushesAndCompactions() throws IOException { putDataWithFlushes(primaryRegion,100,300,100); LOG.info("-- Compacting primary, only 1 store"); primaryRegion.compactStore(Bytes.toBytes("cf1"),NoLimitThroughputController.INSTANCE); reader=createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); int lastReplayed=0; int expectedStoreFileCount=0; while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); CompactionDescriptor compactionDesc=WALEdit.getCompaction(entry.getEdit().getCells().get(0)); if (flushDesc != null) { verifyData(secondaryRegion,0,lastReplayed,cq,families); Store store=secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize=store.getMemStoreSize(); long regionMemstoreSize=secondaryRegion.getMemstoreSize(); long storeFlushableSize=store.getFlushableSize(); long storeSize=store.getSize(); long storeSizeUncompressed=store.getStoreSizeUncompressed(); if (flushDesc.getAction() == FlushAction.START_FLUSH) { LOG.info("-- Replaying flush start in secondary"); PrepareFlushResult result=secondaryRegion.replayWALFlushStartMarker(flushDesc); assertNull(result.result); assertEquals(result.flushOpSeqId,flushDesc.getFlushSequenceNumber()); long newStoreMemstoreSize=store.getMemStoreSize(); LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize)); assertTrue(storeMemstoreSize > newStoreMemstoreSize); } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { LOG.info("-- Replaying flush commit in secondary"); secondaryRegion.replayWALFlushCommitMarker(flushDesc); expectedStoreFileCount++; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } long newFlushableSize=store.getFlushableSize(); assertTrue(storeFlushableSize > newFlushableSize); long newRegionMemstoreSize=secondaryRegion.getMemstoreSize(); assertTrue(regionMemstoreSize > newRegionMemstoreSize); assertTrue(store.getSize() > storeSize); assertTrue(store.getStoreSizeUncompressed() > storeSizeUncompressed); assertEquals(store.getSize(),store.getStorefilesSize()); } verifyData(secondaryRegion,0,lastReplayed + 1,cq,families); } else if (compactionDesc != null) { secondaryRegion.replayWALCompactionMarker(compactionDesc,true,false,Long.MAX_VALUE); for ( Store store : secondaryRegion.getStores()) { if (store.getColumnFamilyName().equals("cf1")) { assertEquals(1,store.getStorefilesCount()); } else { assertEquals(expectedStoreFileCount,store.getStorefilesCount()); } } } else { lastReplayed=replayEdit(secondaryRegion,entry); ; } } assertEquals(400 - 1,lastReplayed); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,400,cq,families); LOG.info("-- Verifying edits from primary. Ensuring that files are not deleted"); verifyData(primaryRegion,0,lastReplayed,cq,families); for ( Store store : primaryRegion.getStores()) { if (store.getColumnFamilyName().equals("cf1")) { assertEquals(1,store.getStorefilesCount()); } else { assertEquals(expectedStoreFileCount,store.getStorefilesCount()); } } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests replaying region open markers from primary region. Checks whether the files are picked up */ @Test public void testReplayRegionOpenEvent() throws IOException { putDataWithFlushes(primaryRegion,100,0,100); int numRows=100; primaryRegion.close(); primaryRegion=HRegion.openHRegion(rootDir,primaryHri,htd,walPrimary,CONF,rss,null); reader=createWALReaderForPrimary(); List regionEvents=Lists.newArrayList(); LOG.info("-- Replaying edits and region events in secondary"); while (true) { WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); RegionEventDescriptor regionEventDesc=WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { } else if (regionEventDesc != null) { regionEvents.add(regionEventDesc); } else { } } assertEquals(3,regionEvents.size()); secondaryRegion.replayWALRegionEventMarker(regionEvents.get(0)); secondaryRegion.replayWALRegionEventMarker(regionEvents.get(1)); int expectedStoreFileCount=0; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } long regionMemstoreSize=secondaryRegion.getMemstoreSize(); assertTrue(regionMemstoreSize == 0); LOG.info("Testing replaying region open event " + regionEvents.get(2)); secondaryRegion.replayWALRegionEventMarker(regionEvents.get(2)); expectedStoreFileCount++; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } Store store=secondaryRegion.getStore(Bytes.toBytes("cf1")); long newFlushableSize=store.getFlushableSize(); assertTrue(newFlushableSize == 0); long newRegionMemstoreSize=secondaryRegion.getMemstoreSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Verifying edits from primary."); verifyData(primaryRegion,0,numRows,cq,families); }

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests the case where we prepare a flush with some seqId and we receive a flush commit marker * less than the previous flush start marker. */ @Test public void testReplayFlushCommitMarkerSmallerThanFlushStartMarker() throws IOException { putDataWithFlushes(primaryRegion,100,200,100); int numRows=300; reader=createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); FlushDescriptor startFlushDesc=null; FlushDescriptor commitFlushDesc=null; int lastReplayed=0; while (true) { System.out.println(lastReplayed); WAL.Entry entry=reader.next(); if (entry == null) { break; } FlushDescriptor flushDesc=WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { if (startFlushDesc == null) { startFlushDesc=flushDesc; } else { LOG.info("-- Replaying flush start in secondary"); startFlushDesc=flushDesc; PrepareFlushResult result=secondaryRegion.replayWALFlushStartMarker(startFlushDesc); assertNull(result.result); } } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { if (commitFlushDesc == null) { commitFlushDesc=flushDesc; } } verifyData(secondaryRegion,0,lastReplayed + 1,cq,families); } else { lastReplayed=replayEdit(secondaryRegion,entry); } } verifyData(secondaryRegion,0,numRows,cq,families); int expectedStoreFileCount=0; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } long regionMemstoreSize=secondaryRegion.getMemstoreSize(); LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START"+ startFlushDesc); assertTrue(commitFlushDesc.getFlushSequenceNumber() < startFlushDesc.getFlushSequenceNumber()); LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc); secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc); expectedStoreFileCount++; for ( Store s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount,s.getStorefilesCount()); } Store store=secondaryRegion.getStore(Bytes.toBytes("cf1")); long newFlushableSize=store.getFlushableSize(); assertTrue(newFlushableSize > 0); long newRegionMemstoreSize=secondaryRegion.getMemstoreSize(); assertEquals(regionMemstoreSize,newRegionMemstoreSize); assertNotNull(secondaryRegion.getPrepareFlushResult()); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion,0,numRows,cq,families); LOG.info("-- Verifying edits from primary."); verifyData(primaryRegion,0,numRows,cq,families); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRegionReplicaSecondaryCannotFlush() throws IOException { putDataByReplay(secondaryRegion,0,1000,cq,families); verifyData(secondaryRegion,0,1000,cq,families); FlushResultImpl flush=(FlushResultImpl)secondaryRegion.flush(true); assertEquals(flush.result,FlushResultImpl.Result.CANNOT_FLUSH); verifyData(secondaryRegion,0,1000,cq,families); Map> files=secondaryRegion.close(false); for ( List f : files.values()) { assertTrue(f.isEmpty()); } }

Class: org.apache.hadoop.hbase.regionserver.TestHeapMemoryManager

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testBlockedFlushesIncreaseMemstoreInSteadyState() throws Exception { BlockCacheStub blockCache=new BlockCacheStub((long)(maxHeapSize * 0.4)); MemstoreFlusherStub memStoreFlusher=new MemstoreFlusherStub((long)(maxHeapSize * 0.4)); RegionServerAccountingStub regionServerAccounting=new RegionServerAccountingStub(); blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize((long)(maxHeapSize * 0.4 * 0.8)); blockCache.setTestBlockSize((long)(maxHeapSize * 0.4 * 0.8)); Configuration conf=HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY,0.75f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY,0.10f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY,0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY,0.05f); conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD,1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE,0); HeapMemoryManager heapMemoryManager=new HeapMemoryManager(blockCache,memStoreFlusher,new RegionServerStub(conf),regionServerAccounting); long oldMemstoreHeapSize=memStoreFlusher.memstoreSize; long oldBlockCacheSize=blockCache.maxSize; final ChoreService choreService=new ChoreService("TEST_SERVER_NAME"); heapMemoryManager.start(choreService); memStoreFlusher.flushType=FlushType.ABOVE_LOWER_MARK; memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); blockCache.evictBlock(null); blockCache.evictBlock(null); Thread.sleep(1500); assertEquals(oldMemstoreHeapSize,memStoreFlusher.memstoreSize); assertEquals(oldBlockCacheSize,blockCache.maxSize); memStoreFlusher.flushType=FlushType.ABOVE_HIGHER_MARK; memStoreFlusher.requestFlush(null,false); blockCache.evictBlock(null); blockCache.evictBlock(null); blockCache.evictBlock(null); blockCache.evictBlock(null); Thread.sleep(1500); assertHeapSpaceDelta(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE,oldMemstoreHeapSize,memStoreFlusher.memstoreSize); assertHeapSpaceDelta(-(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE),oldBlockCacheSize,blockCache.maxSize); }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testWhenClusterIsHavingMoreWritesThanReads() throws Exception { BlockCacheStub blockCache=new BlockCacheStub((long)(maxHeapSize * 0.4)); MemstoreFlusherStub memStoreFlusher=new MemstoreFlusherStub((long)(maxHeapSize * 0.4)); RegionServerAccountingStub regionServerAccounting=new RegionServerAccountingStub(); blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize((long)(maxHeapSize * 0.4 * 0.8)); blockCache.setTestBlockSize((long)(maxHeapSize * 0.4 * 0.8)); Configuration conf=HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY,0.75f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY,0.10f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY,0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY,0.05f); conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD,1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE,0); HeapMemoryManager heapMemoryManager=new HeapMemoryManager(blockCache,memStoreFlusher,new RegionServerStub(conf),regionServerAccounting); long oldMemstoreHeapSize=memStoreFlusher.memstoreSize; long oldBlockCacheSize=blockCache.maxSize; final ChoreService choreService=new ChoreService("TEST_SERVER_NAME"); heapMemoryManager.start(choreService); memStoreFlusher.flushType=FlushType.ABOVE_LOWER_MARK; memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); blockCache.evictBlock(null); Thread.sleep(1500); assertEquals(oldMemstoreHeapSize,memStoreFlusher.memstoreSize); assertEquals(oldBlockCacheSize,blockCache.maxSize); memStoreFlusher.flushType=FlushType.ABOVE_LOWER_MARK; memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); waitForTune(memStoreFlusher,memStoreFlusher.memstoreSize); assertHeapSpaceDelta(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE,oldMemstoreHeapSize,memStoreFlusher.memstoreSize); assertHeapSpaceDelta(-(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE),oldBlockCacheSize,blockCache.maxSize); }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testWhenClusterIsWriteHeavyWithEmptyMemstore() throws Exception { BlockCacheStub blockCache=new BlockCacheStub((long)(maxHeapSize * 0.4)); MemstoreFlusherStub memStoreFlusher=new MemstoreFlusherStub((long)(maxHeapSize * 0.4)); RegionServerAccountingStub regionServerAccounting=new RegionServerAccountingStub(); blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize(0); Configuration conf=HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY,0.75f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY,0.10f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY,0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY,0.05f); conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD,1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE,0); HeapMemoryManager heapMemoryManager=new HeapMemoryManager(blockCache,memStoreFlusher,new RegionServerStub(conf),regionServerAccounting); long oldMemstoreHeapSize=memStoreFlusher.memstoreSize; long oldBlockCacheSize=blockCache.maxSize; final ChoreService choreService=new ChoreService("TEST_SERVER_NAME"); heapMemoryManager.start(choreService); memStoreFlusher.flushType=FlushType.ABOVE_HIGHER_MARK; memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); memStoreFlusher.requestFlush(null,false); memStoreFlusher.flushType=FlushType.ABOVE_LOWER_MARK; memStoreFlusher.requestFlush(null,false); Thread.sleep(1500); assertEquals(oldMemstoreHeapSize,memStoreFlusher.memstoreSize); assertEquals(oldBlockCacheSize,blockCache.maxSize); }

InternalCallVerifier BooleanVerifier 
@Test public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY,0.02f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY,0.75f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY,0.03f); HeapMemoryManager manager=new HeapMemoryManager(new BlockCacheStub(0),new MemstoreFlusherStub(0),new RegionServerStub(conf),new RegionServerAccountingStub()); assertFalse(manager.isTunerOn()); }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testWhenClusterIsReadHeavyWithEmptyBlockCache() throws Exception { BlockCacheStub blockCache=new BlockCacheStub((long)(maxHeapSize * 0.4)); MemstoreFlusherStub memStoreFlusher=new MemstoreFlusherStub((long)(maxHeapSize * 0.4)); RegionServerAccountingStub regionServerAccounting=new RegionServerAccountingStub(); blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize(0); Configuration conf=HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY,0.75f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY,0.10f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY,0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY,0.05f); conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD,1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE,0); HeapMemoryManager heapMemoryManager=new HeapMemoryManager(blockCache,memStoreFlusher,new RegionServerStub(conf),regionServerAccounting); long oldMemstoreHeapSize=memStoreFlusher.memstoreSize; long oldBlockCacheSize=blockCache.maxSize; final ChoreService choreService=new ChoreService("TEST_SERVER_NAME"); heapMemoryManager.start(choreService); blockCache.evictBlock(null); blockCache.evictBlock(null); blockCache.evictBlock(null); Thread.sleep(1500); assertEquals(oldMemstoreHeapSize,memStoreFlusher.memstoreSize); assertEquals(oldBlockCacheSize,blockCache.maxSize); }

InternalCallVerifier BooleanVerifier 
@Test public void testAutoTunerShouldBeOffWhenMaxMinRangesForBlockCacheIsNotGiven() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,0.02f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY,0.75f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY,0.03f); HeapMemoryManager manager=new HeapMemoryManager(new BlockCacheStub(0),new MemstoreFlusherStub(0),new RegionServerStub(conf),new RegionServerAccountingStub()); assertFalse(manager.isTunerOn()); }

Class: org.apache.hadoop.hbase.regionserver.TestKeepDeletes

InternalCallVerifier EqualityVerifier 
/** * Test keeping deleted rows together with min versions set * @throws Exception */ @Test public void testWithMinVersions() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),3,1000,1,KeepDeletedCells.TRUE); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime() - 2000; Put p=new Put(T1,ts); p.addColumn(c0,c0,T3); region.put(p); p=new Put(T1,ts - 1); p.addColumn(c0,c0,T2); region.put(p); p=new Put(T1,ts - 3); p.addColumn(c0,c0,T1); region.put(p); p=new Put(T1,ts - 4); p.addColumn(c0,c0,T0); region.put(p); Delete d=new Delete(T1,ts - 1); region.delete(d); d=new Delete(T1,ts - 2); d.addColumns(c0,c0,ts - 1); region.delete(d); Get g=new Get(T1); g.setMaxVersions(); g.setTimeRange(0L,ts - 2); Result r=region.get(g); checkResult(r,c0,c0,T1,T0); assertEquals(4,countDeleteMarkers(region)); region.flush(true); assertEquals(4,countDeleteMarkers(region)); r=region.get(g); checkResult(r,c0,c0,T1); p=new Put(T1,ts + 1); p.addColumn(c0,c0,T4); region.put(p); region.flush(true); assertEquals(4,countDeleteMarkers(region)); r=region.get(g); checkResult(r,c0,c0,T1); p=new Put(T1,ts + 2); p.addColumn(c0,c0,T5); region.put(p); region.flush(true); region.compact(true); assertEquals(2,countDeleteMarkers(region)); region.compact(true); assertEquals(0,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that deleted rows are retained. * Family delete markers are deleted. * Column Delete markers are versioned * Time range scan of deleted rows are possible */ @Test public void testBasicScenario() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),0,3,HConstants.FOREVER,KeepDeletedCells.TRUE); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime(); Put p=new Put(T1,ts); p.addColumn(c0,c0,T1); region.put(p); p=new Put(T1,ts + 1); p.addColumn(c0,c0,T2); region.put(p); p=new Put(T1,ts + 2); p.addColumn(c0,c0,T3); region.put(p); p=new Put(T1,ts + 4); p.addColumn(c0,c0,T4); region.put(p); Delete d=new Delete(T1,ts + 2); region.delete(d); assertEquals(3,countDeleteMarkers(region)); Get g=new Get(T1); g.setMaxVersions(); g.setTimeRange(0L,ts + 2); Result r=region.get(g); checkResult(r,c0,c0,T2,T1); region.flush(true); r=region.get(g); checkResult(r,c0,c0,T2); region.compact(true); region.compact(true); assertEquals(1,countDeleteMarkers(region)); r=region.get(g); checkResult(r,c0,c0,T2); g.setTimeRange(0L,ts + 4); r=region.get(g); assertTrue(r.isEmpty()); p=new Put(T1,ts + 5); p.addColumn(c0,c0,T5); region.put(p); p=new Put(T1,ts + 6); p.addColumn(c0,c0,T6); region.put(p); p=new Put(T1,ts); p.addColumn(c0,c0,T1); region.put(p); r=region.get(g); assertTrue(r.isEmpty()); region.flush(true); region.compact(true); region.compact(true); region.put(p); r=region.get(g); checkResult(r,c0,c0,T1); assertEquals(0,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier EqualityVerifier 
/** * Test delete marker removal from store files. */ @Test public void testDeleteMarkerExpiration() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),0,1,HConstants.FOREVER,KeepDeletedCells.TRUE); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime(); Put p=new Put(T1,ts); p.addColumn(c0,c0,T1); region.put(p); p=new Put(T1,ts - 10); p.addColumn(c1,c0,T1); region.put(p); Delete d=new Delete(T1,ts); d.addColumns(c0,c0,ts); region.delete(d); d=new Delete(T1,ts); d.addFamily(c0,ts); region.delete(d); d=new Delete(T1,ts); d.addColumn(c0,c0,ts + 1); region.delete(d); d=new Delete(T1,ts); d.addColumn(c0,c0,ts + 2); region.delete(d); assertEquals(4,countDeleteMarkers(region)); region.flush(true); assertEquals(4,countDeleteMarkers(region)); region.compact(false); assertEquals(4,countDeleteMarkers(region)); p=new Put(T1,ts + 3); p.addColumn(c0,c0,T1); region.put(p); region.flush(true); region.compact(true); assertEquals(4,countDeleteMarkers(region)); region.compact(true); assertEquals(0,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier EqualityVerifier 
/** * Verify that delete markers are removed from an otherwise empty store. */ @Test public void testDeleteMarkerExpirationEmptyStore() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),0,1,HConstants.FOREVER,KeepDeletedCells.TRUE); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime(); Delete d=new Delete(T1,ts); d.addColumns(c0,c0,ts); region.delete(d); d=new Delete(T1,ts); d.addFamily(c0); region.delete(d); d=new Delete(T1,ts); d.addColumn(c0,c0,ts + 1); region.delete(d); d=new Delete(T1,ts); d.addColumn(c0,c0,ts + 2); region.delete(d); assertEquals(4,countDeleteMarkers(region)); region.flush(true); assertEquals(4,countDeleteMarkers(region)); region.compact(false); assertEquals(4,countDeleteMarkers(region)); region.compact(true); assertEquals(0,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier EqualityVerifier 
/** * Test delete marker removal from store files. */ @Test public void testWithOldRow() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),0,1,HConstants.FOREVER,KeepDeletedCells.TRUE); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime(); Put p=new Put(T1,ts); p.addColumn(c0,c0,T1); region.put(p); p=new Put(T2,ts - 10); p.addColumn(c0,c0,T1); region.put(p); Delete d=new Delete(T1,ts); d.addColumns(c0,c0,ts); region.delete(d); d=new Delete(T1,ts); d.addFamily(c0,ts); region.delete(d); d=new Delete(T1,ts); d.addColumn(c0,c0,ts + 1); region.delete(d); d=new Delete(T1,ts); d.addColumn(c0,c0,ts + 2); region.delete(d); assertEquals(4,countDeleteMarkers(region)); region.flush(true); assertEquals(4,countDeleteMarkers(region)); region.compact(false); assertEquals(4,countDeleteMarkers(region)); p=new Put(T1,ts + 3); p.addColumn(c0,c0,T1); region.put(p); region.flush(true); region.compact(true); assertEquals(4,countDeleteMarkers(region)); region.compact(true); assertEquals(4,countDeleteMarkers(region)); p=new Put(T1,ts + 4); p.addColumn(c0,c0,T1); region.put(p); region.compact(true); assertEquals(1,countDeleteMarkers(region)); region.compact(true); assertEquals(1,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier EqualityVerifier 
/** * Test keeping deleted rows together with min versions set * @throws Exception */ @Test public void testWithTTL() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),1,1000,1,KeepDeletedCells.TTL); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime() - 2000; Put p=new Put(T1,ts); p.addColumn(c0,c0,T3); region.put(p); p=new Put(T2,ts - 10); p.addColumn(c0,c0,T1); region.put(p); checkGet(region,T1,c0,c0,ts + 1,T3); Delete d=new Delete(T1,ts + 2); region.delete(d); checkGet(region,T1,c0,c0,ts + 1,T3); assertEquals(3,countDeleteMarkers(region)); region.flush(true); assertEquals(3,countDeleteMarkers(region)); checkGet(region,T1,c0,c0,ts + 1); region.compact(true); assertEquals(0,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier EqualityVerifier 
/** * Verify that column/version delete makers are sorted * with their respective puts and removed correctly by * versioning (i.e. not relying on the store earliestPutTS). */ @Test public void testDeleteMarkerVersioning() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),0,1,HConstants.FOREVER,KeepDeletedCells.TRUE); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime(); Put p=new Put(T1,ts); p.addColumn(c0,c0,T1); region.put(p); p=new Put(T1,ts - 10); p.addColumn(c0,c1,T1); region.put(p); Delete d=new Delete(T1,ts); d.addColumns(c0,c0,ts); region.delete(d); d=new Delete(T1,ts + 1); d.addColumn(c0,c0,ts + 1); region.delete(d); d=new Delete(T1,ts + 3); d.addColumn(c0,c0,ts + 3); region.delete(d); region.flush(true); region.compact(true); region.compact(true); assertEquals(3,countDeleteMarkers(region)); p=new Put(T1,ts + 2); p.addColumn(c0,c0,T2); region.put(p); assertEquals(3,countDeleteMarkers(region)); p=new Put(T1,ts + 3); p.addColumn(c0,c0,T3); region.put(p); assertEquals(1,countDeleteMarkers(region)); region.flush(true); assertEquals(3,countDeleteMarkers(region)); region.compact(true); assertEquals(3,countDeleteMarkers(region)); p=new Put(T1,ts + 4); p.addColumn(c0,c0,T4); region.put(p); region.flush(true); assertEquals(1,countDeleteMarkers(region)); region.compact(true); region.compact(true); assertEquals(1,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * basic verification of existing behavior */ @Test public void testWithoutKeepingDeletes() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),0,3,HConstants.FOREVER,KeepDeletedCells.FALSE); Region region=hbu.createLocalHRegion(htd,null,null); long ts=EnvironmentEdgeManager.currentTime(); Put p=new Put(T1,ts); p.addColumn(c0,c0,T1); region.put(p); Get gOne=new Get(T1); gOne.setMaxVersions(); gOne.setTimeRange(0L,ts + 1); Result rOne=region.get(gOne); assertFalse(rOne.isEmpty()); Delete d=new Delete(T1,ts + 2); d.addColumn(c0,c0,ts); region.delete(d); Get g=new Get(T1); g.setMaxVersions(); g.setTimeRange(0L,ts + 1); Result r=region.get(g); assertTrue(r.isEmpty()); Scan s=new Scan(); s.setMaxVersions(); s.setTimeRange(0L,ts + 1); InternalScanner scanner=region.getScanner(s); List kvs=new ArrayList(); while (scanner.next(kvs)) ; assertTrue(kvs.isEmpty()); region.flush(true); region.compact(false); assertEquals(1,countDeleteMarkers(region)); region.compact(true); assertEquals(0,countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); }

Class: org.apache.hadoop.hbase.regionserver.TestKeyValueHeap

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScannerLeak() throws IOException { List l1=new ArrayList(); l1.add(new KeyValue(row1,fam1,col5,data)); l1.add(new KeyValue(row2,fam1,col1,data)); l1.add(new KeyValue(row2,fam1,col2,data)); Scanner s1=new Scanner(l1); scanners.add(s1); List l2=new ArrayList(); l2.add(new KeyValue(row1,fam1,col1,data)); l2.add(new KeyValue(row1,fam1,col2,data)); Scanner s2=new Scanner(l2); scanners.add(s2); List l3=new ArrayList(); l3.add(new KeyValue(row1,fam1,col3,data)); l3.add(new KeyValue(row1,fam1,col4,data)); l3.add(new KeyValue(row1,fam2,col1,data)); l3.add(new KeyValue(row1,fam2,col2,data)); l3.add(new KeyValue(row2,fam1,col3,data)); Scanner s3=new Scanner(l3); scanners.add(s3); List l4=new ArrayList(); Scanner s4=new Scanner(l4); scanners.add(s4); KeyValueHeap kvh=new KeyValueHeap(scanners,CellComparator.COMPARATOR); while (kvh.next() != null) ; assertEquals(4,kvh.scannersForDelayedClose.size()); assertTrue(kvh.scannersForDelayedClose.contains(s1)); assertTrue(kvh.scannersForDelayedClose.contains(s2)); assertTrue(kvh.scannersForDelayedClose.contains(s3)); assertTrue(kvh.scannersForDelayedClose.contains(s4)); kvh.close(); for ( KeyValueScanner scanner : scanners) { assertTrue(((Scanner)scanner).isClosed()); } }

Class: org.apache.hadoop.hbase.regionserver.TestMajorCompaction

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for HBASE-5920 */ @Test public void testUserMajorCompactionRequest() throws IOException { Store store=r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i=0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); } store.triggerMajorCompaction(); CompactionRequest request=store.requestCompaction(Store.PRIORITY_USER,null).getRequest(); assertNotNull("Expected to receive a compaction request",request); assertEquals("User-requested major compaction should always occur, even if there are too many store files",true,request.isMajor()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testTimeBasedMajorCompaction() throws Exception { int delay=10 * 1000; float jitterPct=0.20f; conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD,delay); conf.setFloat("hbase.hregion.majorcompaction.jitter",jitterPct); HStore s=((HStore)r.getStore(COLUMN_FAMILY)); s.storeEngine.getCompactionPolicy().setConf(conf); try { createStoreFile(r); createStoreFile(r); r.compact(true); createStoreFile(r); r.compact(false); assertEquals(2,s.getStorefilesCount()); RatioBasedCompactionPolicy c=(RatioBasedCompactionPolicy)s.storeEngine.getCompactionPolicy(); Collection storeFiles=s.getStorefiles(); long mcTime=c.getNextMajorCompactTime(storeFiles); for (int i=0; i < 10; ++i) { assertEquals(mcTime,c.getNextMajorCompactTime(storeFiles)); } long jitter=Math.round(delay * jitterPct); assertTrue(delay - jitter <= mcTime && mcTime <= delay + jitter); Thread.sleep(mcTime); r.compact(false); assertEquals(1,s.getStorefilesCount()); } finally { conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD,1000 * 60 * 60* 24); conf.setFloat("hbase.hregion.majorcompaction.jitter",0.20F); createStoreFile(r); r.compact(true); assertEquals(1,s.getStorefilesCount()); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for HBASE-5920 - Test user requested major compactions always occurring */ @Test public void testNonUserMajorCompactionRequest() throws Exception { Store store=r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i=0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); } store.triggerMajorCompaction(); CompactionRequest request=store.requestCompaction(Store.NO_PRIORITY,null).getRequest(); assertNotNull("Expected to receive a compaction request",request); assertEquals("System-requested major compaction should not occur if there are too many store files",false,request.isMajor()); }

Class: org.apache.hadoop.hbase.regionserver.TestMasterAddressTracker

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNoBackups() throws Exception { final ServerName sn=ServerName.valueOf("localhost",1234,System.currentTimeMillis()); final MasterAddressTracker addressTracker=setupMasterTracker(sn,1772); try { assertEquals("Should receive 0 for backup not found.",0,addressTracker.getBackupMasterInfoPort(ServerName.valueOf("doesnotexist.example.com",1234,System.currentTimeMillis()))); } finally { assertTrue("Couldn't clean up master",MasterAddressTracker.deleteIfEquals(addressTracker.getWatcher(),sn.toString())); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Unit tests that uses ZooKeeper but does not use the master-side methods * but rather acts directly on ZK. * @throws Exception */ @Test public void testMasterAddressTrackerFromZK() throws Exception { final int infoPort=1235; final ServerName sn=ServerName.valueOf("localhost",1234,System.currentTimeMillis()); final MasterAddressTracker addressTracker=setupMasterTracker(sn,infoPort); try { assertTrue(addressTracker.hasMaster()); ServerName pulledAddress=addressTracker.getMasterAddress(); assertTrue(pulledAddress.equals(sn)); assertEquals(infoPort,addressTracker.getMasterInfoPort()); } finally { assertTrue("Couldn't clean up master",MasterAddressTracker.deleteIfEquals(addressTracker.getWatcher(),sn.toString())); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testDeleteIfEquals() throws Exception { final ServerName sn=ServerName.valueOf("localhost",1234,System.currentTimeMillis()); final MasterAddressTracker addressTracker=setupMasterTracker(sn,1772); try { assertFalse("shouldn't have deleted wrong master server.",MasterAddressTracker.deleteIfEquals(addressTracker.getWatcher(),"some other string.")); } finally { assertTrue("Couldn't clean up master",MasterAddressTracker.deleteIfEquals(addressTracker.getWatcher(),sn.toString())); } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNoMaster() throws Exception { final MasterAddressTracker addressTracker=setupMasterTracker(null,1772); assertFalse(addressTracker.hasMaster()); assertNull("should get null master when none active.",addressTracker.getMasterAddress()); assertEquals("Should receive 0 for backup not found.",0,addressTracker.getMasterInfoPort()); }

Class: org.apache.hadoop.hbase.regionserver.TestMemStoreChunkPool

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReusingChunks(){ Random rand=new Random(); MemStoreLAB mslab=new HeapMemStoreLAB(conf); int expectedOff=0; byte[] lastBuffer=null; for (int i=0; i < 100; i++) { int size=rand.nextInt(1000); ByteRange alloc=mslab.allocateBytes(size); if (alloc.getBytes() != lastBuffer) { expectedOff=0; lastBuffer=alloc.getBytes(); } assertEquals(expectedOff,alloc.getOffset()); assertTrue("Allocation overruns buffer",alloc.getOffset() + size <= alloc.getBytes().length); expectedOff+=size; } mslab.close(); int chunkCount=chunkPool.getPoolSize(); assertTrue(chunkCount > 0); mslab=new HeapMemStoreLAB(conf); mslab.allocateBytes(1000); assertEquals(chunkCount - 1,chunkPool.getPoolSize()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPuttingBackChunksAfterFlushing() throws UnexpectedStateException { byte[] row=Bytes.toBytes("testrow"); byte[] fam=Bytes.toBytes("testfamily"); byte[] qf1=Bytes.toBytes("testqualifier1"); byte[] qf2=Bytes.toBytes("testqualifier2"); byte[] qf3=Bytes.toBytes("testqualifier3"); byte[] qf4=Bytes.toBytes("testqualifier4"); byte[] qf5=Bytes.toBytes("testqualifier5"); byte[] val=Bytes.toBytes("testval"); DefaultMemStore memstore=new DefaultMemStore(); memstore.add(new KeyValue(row,fam,qf1,val)); memstore.add(new KeyValue(row,fam,qf2,val)); memstore.add(new KeyValue(row,fam,qf3,val)); MemStoreSnapshot snapshot=memstore.snapshot(); assertEquals(3,memstore.snapshot.size()); assertEquals(0,memstore.cellSet.size()); memstore.add(new KeyValue(row,fam,qf4,val)); memstore.add(new KeyValue(row,fam,qf5,val)); assertEquals(2,memstore.cellSet.size()); memstore.clearSnapshot(snapshot.getId()); int chunkCount=chunkPool.getPoolSize(); assertTrue(chunkCount > 0); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPuttingBackChunksWithOpeningScanner() throws UnexpectedStateException { byte[] row=Bytes.toBytes("testrow"); byte[] fam=Bytes.toBytes("testfamily"); byte[] qf1=Bytes.toBytes("testqualifier1"); byte[] qf2=Bytes.toBytes("testqualifier2"); byte[] qf3=Bytes.toBytes("testqualifier3"); byte[] qf4=Bytes.toBytes("testqualifier4"); byte[] qf5=Bytes.toBytes("testqualifier5"); byte[] qf6=Bytes.toBytes("testqualifier6"); byte[] qf7=Bytes.toBytes("testqualifier7"); byte[] val=Bytes.toBytes("testval"); DefaultMemStore memstore=new DefaultMemStore(); memstore.add(new KeyValue(row,fam,qf1,val)); memstore.add(new KeyValue(row,fam,qf2,val)); memstore.add(new KeyValue(row,fam,qf3,val)); MemStoreSnapshot snapshot=memstore.snapshot(); assertEquals(3,memstore.snapshot.size()); assertEquals(0,memstore.cellSet.size()); memstore.add(new KeyValue(row,fam,qf4,val)); memstore.add(new KeyValue(row,fam,qf5,val)); assertEquals(2,memstore.cellSet.size()); List scanners=memstore.getScanners(0); memstore.clearSnapshot(snapshot.getId()); assertTrue(chunkPool.getPoolSize() == 0); for ( KeyValueScanner scanner : scanners) { scanner.close(); } assertTrue(chunkPool.getPoolSize() > 0); chunkPool.clearChunks(); snapshot=memstore.snapshot(); memstore.add(new KeyValue(row,fam,qf6,val)); memstore.add(new KeyValue(row,fam,qf7,val)); scanners=memstore.getScanners(0); for ( KeyValueScanner scanner : scanners) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); assertTrue(chunkPool.getPoolSize() > 0); }

Class: org.apache.hadoop.hbase.regionserver.TestMemStoreLAB

InternalCallVerifier NullVerifier 
@Test public void testLABLargeAllocation(){ MemStoreLAB mslab=new HeapMemStoreLAB(); ByteRange alloc=mslab.allocateBytes(2 * 1024 * 1024); assertNull("2MB allocation shouldn't be satisfied by LAB.",alloc); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test a bunch of random allocations */ @Test public void testLABRandomAllocation(){ Random rand=new Random(); MemStoreLAB mslab=new HeapMemStoreLAB(); int expectedOff=0; byte[] lastBuffer=null; for (int i=0; i < 100000; i++) { int size=rand.nextInt(1000); ByteRange alloc=mslab.allocateBytes(size); if (alloc.getBytes() != lastBuffer) { expectedOff=0; lastBuffer=alloc.getBytes(); } assertEquals(expectedOff,alloc.getOffset()); assertTrue("Allocation overruns buffer",alloc.getOffset() + size <= alloc.getBytes().length); expectedOff+=size; } }

Class: org.apache.hadoop.hbase.regionserver.TestMetricsRegionServer

InternalCallVerifier NullVerifier 
@Test public void testConstuctor(){ assertNotNull("There should be a hadoop1/hadoop2 metrics source",rsm.getMetricsSource()); assertNotNull("The RegionServerMetricsWrapper should be accessable",rsm.getRegionServerWrapper()); }

Class: org.apache.hadoop.hbase.regionserver.TestMetricsRegionServerSourceImpl

InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier 
@Test public void testGetInstance() throws Exception { MetricsRegionServerSourceFactory metricsRegionServerSourceFactory=CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsRegionServerSource serverSource=metricsRegionServerSourceFactory.createServer(null); assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); assertSame(metricsRegionServerSourceFactory,CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); }

Class: org.apache.hadoop.hbase.regionserver.TestMetricsRegionSourceImpl

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompareToHashCodeEquals() throws Exception { MetricsRegionServerSourceFactory fact=CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsRegionSource one=fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource oneClone=fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource two=fact.createRegion(new RegionWrapperStub("TWO")); assertEquals(0,one.compareTo(oneClone)); assertEquals(one.hashCode(),oneClone.hashCode()); assertNotEquals(one,two); assertTrue(one.compareTo(two) != 0); assertTrue(two.compareTo(one) != 0); assertTrue(two.compareTo(one) != one.compareTo(two)); assertTrue(two.compareTo(two) == 0); }

Class: org.apache.hadoop.hbase.regionserver.TestMinVersions

InternalCallVerifier BooleanVerifier 
/** * Verify basic minimum versions functionality */ @Test public void testBaseCase() throws Exception { HTableDescriptor htd=hbu.createTableDescriptor(name.getMethodName(),2,1000,1,KeepDeletedCells.FALSE); Region region=hbu.createLocalHRegion(htd,null,null); try { long ts=EnvironmentEdgeManager.currentTime() - 2000; Put p=new Put(T1,ts - 3); p.addColumn(c0,c0,T1); region.put(p); p=new Put(T1,ts - 2); p.addColumn(c0,c0,T2); region.put(p); p=new Put(T1,ts - 1); p.addColumn(c0,c0,T3); region.put(p); p=new Put(T1,ts); p.addColumn(c0,c0,T4); region.put(p); Result r=region.get(new Get(T1)); checkResult(r,c0,T4); Get g=new Get(T1); g.setTimeRange(0L,ts + 1); r=region.get(g); checkResult(r,c0,T4); g.setTimeRange(0L,ts - 2); r=region.get(g); checkResult(r,c0,T1); g=new Get(T1); g.setMaxVersions(); r=region.get(g); checkResult(r,c0,T4,T3); g=new Get(T1); g.setMaxVersions(); g.addColumn(c0,c0); r=region.get(g); checkResult(r,c0,T4,T3); region.flush(true); g=new Get(T1); g.setTimeRange(0L,ts - 2); r=region.get(g); assertTrue(r.isEmpty()); region.compact(true); g=new Get(T1); g.setTimeRange(0L,ts + 1); r=region.get(g); checkResult(r,c0,T4); g.setTimeRange(0L,ts); r=region.get(g); checkResult(r,c0,T3); g.setTimeRange(0L,ts - 1); r=region.get(g); assertTrue(r.isEmpty()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } }

Class: org.apache.hadoop.hbase.regionserver.TestMiniBatchOperationInProgress

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMiniBatchOperationInProgressMethods(){ Pair[] operations=new Pair[10]; OperationStatus[] retCodeDetails=new OperationStatus[10]; WALEdit[] walEditsFromCoprocessors=new WALEdit[10]; for (int i=0; i < 10; i++) { operations[i]=new Pair(new Put(Bytes.toBytes(i)),null); } MiniBatchOperationInProgress> miniBatch=new MiniBatchOperationInProgress>(operations,retCodeDetails,walEditsFromCoprocessors,0,5); assertEquals(5,miniBatch.size()); assertTrue(Bytes.equals(Bytes.toBytes(0),miniBatch.getOperation(0).getFirst().getRow())); assertTrue(Bytes.equals(Bytes.toBytes(2),miniBatch.getOperation(2).getFirst().getRow())); assertTrue(Bytes.equals(Bytes.toBytes(4),miniBatch.getOperation(4).getFirst().getRow())); try { miniBatch.getOperation(5); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } miniBatch.setOperationStatus(1,OperationStatus.FAILURE); assertEquals(OperationStatus.FAILURE,retCodeDetails[1]); try { miniBatch.setOperationStatus(6,OperationStatus.FAILURE); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } try { miniBatch.setWalEdit(5,new WALEdit()); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } miniBatch=new MiniBatchOperationInProgress>(operations,retCodeDetails,walEditsFromCoprocessors,7,10); try { miniBatch.setWalEdit(-1,new WALEdit()); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } try { miniBatch.getOperation(-1); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } try { miniBatch.getOperation(3); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } try { miniBatch.getOperationStatus(9); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } try { miniBatch.setOperationStatus(3,OperationStatus.FAILURE); fail("Should throw Exception while accessing out of range"); } catch ( ArrayIndexOutOfBoundsException e) { } assertTrue(Bytes.equals(Bytes.toBytes(7),miniBatch.getOperation(0).getFirst().getRow())); assertTrue(Bytes.equals(Bytes.toBytes(9),miniBatch.getOperation(2).getFirst().getRow())); miniBatch.setOperationStatus(1,OperationStatus.SUCCESS); assertEquals(OperationStatus.SUCCESS,retCodeDetails[8]); WALEdit wal=new WALEdit(); miniBatch.setWalEdit(0,wal); assertEquals(wal,walEditsFromCoprocessors[7]); }

Class: org.apache.hadoop.hbase.regionserver.TestMobStoreCompaction

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMajorCompactionAfterDelete() throws Exception { init(UTIL.getConfiguration(),100); byte[] dummyData=makeDummyData(200); Table loader=new RegionAsTable(region); int numHfiles=compactionThreshold - 1; byte[] deleteRow=Bytes.add(STARTROW,Bytes.toBytes(0)); for (int i=0; i < numHfiles; i++) { Put p=createPut(i,dummyData); loader.put(p); region.flush(true); } assertEquals("Before compaction: store files",numHfiles,countStoreFiles()); assertEquals("Before compaction: mob file count",numHfiles,countMobFiles()); assertEquals("Before compaction: rows",numHfiles,countRows()); assertEquals("Before compaction: mob rows",numHfiles,countMobRows()); assertEquals("Before compaction: number of mob cells",numHfiles,countMobCellsInMetadata()); Delete delete=new Delete(deleteRow); delete.addFamily(COLUMN_FAMILY); region.delete(delete); region.flush(true); assertEquals("Before compaction: store files",numHfiles + 1,countStoreFiles()); assertEquals("Before compaction: mob files",numHfiles,countMobFiles()); region.compact(true); assertEquals("After compaction: store files",1,countStoreFiles()); assertEquals("After compaction: mob files",numHfiles + 1,countMobFiles()); Scan scan=new Scan(); scan.setRaw(true); InternalScanner scanner=region.getScanner(scan); List results=new ArrayList<>(); scanner.next(results); int deleteCount=0; while (!results.isEmpty()) { for ( Cell c : results) { if (c.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()) { deleteCount++; assertTrue(Bytes.equals(CellUtil.cloneRow(c),deleteRow)); } } results.clear(); scanner.next(results); } assertEquals(0,deleteCount); scanner.close(); assertEquals("The cells in mob files",numHfiles - 1,countMobCellsInMobFiles(1)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test will first generate store files, then bulk load them and trigger the compaction. * When compaction, the cell value will be larger than the threshold. */ @Test public void testMobCompactionWithBulkload() throws Exception { init(UTIL.getConfiguration(),300); byte[] dummyData=makeDummyData(600); Path hbaseRootDir=FSUtils.getRootDir(conf); Path basedir=new Path(hbaseRootDir,htd.getNameAsString()); List> hfiles=new ArrayList<>(1); for (int i=0; i < compactionThreshold; i++) { Path hpath=new Path(basedir,"hfile" + i); hfiles.add(Pair.newPair(COLUMN_FAMILY,hpath.toString())); createHFile(hpath,i,dummyData); } boolean result=region.bulkLoadHFiles(hfiles,true,null); assertTrue("Bulkload result:",result); assertEquals("Before compaction: store files",compactionThreshold,countStoreFiles()); assertEquals("Before compaction: mob file count",0,countMobFiles()); assertEquals("Before compaction: rows",compactionThreshold,countRows()); assertEquals("Before compaction: mob rows",0,countMobRows()); assertEquals("Before compaction: referenced mob file count",0,countReferencedMobFiles()); region.compactStores(); assertEquals("After compaction: store files",1,countStoreFiles()); assertEquals("After compaction: mob file count:",1,countMobFiles()); assertEquals("After compaction: rows",compactionThreshold,countRows()); assertEquals("After compaction: mob rows",compactionThreshold,countMobRows()); assertEquals("After compaction: referenced mob file count",1,countReferencedMobFiles()); assertEquals("After compaction: number of mob cells",compactionThreshold,countMobCellsInMetadata()); }

Class: org.apache.hadoop.hbase.regionserver.TestMobStoreScanner

InternalCallVerifier EqualityVerifier 
@Test public void testReadPt() throws Exception { TableName tn=TableName.valueOf("testReadPt"); setUp(0L,tn); long ts=System.currentTimeMillis(); byte[] value1=Bytes.toBytes("value1"); Put put1=new Put(row1); put1.addColumn(family,qf1,ts,value1); table.put(put1); Put put2=new Put(row2); byte[] value2=Bytes.toBytes("value2"); put2.addColumn(family,qf1,ts,value2); table.put(put2); Scan scan=new Scan(); scan.setCaching(1); ResultScanner rs=table.getScanner(scan); Put put3=new Put(row1); byte[] value3=Bytes.toBytes("value3"); put3.addColumn(family,qf1,ts,value3); table.put(put3); Put put4=new Put(row2); byte[] value4=Bytes.toBytes("value4"); put4.addColumn(family,qf1,ts,value4); table.put(put4); Result result=rs.next(); Cell cell=result.getColumnLatestCell(family,qf1); Assert.assertArrayEquals(value1,CellUtil.cloneValue(cell)); admin.flush(tn); result=rs.next(); cell=result.getColumnLatestCell(family,qf1); Assert.assertArrayEquals(value2,CellUtil.cloneValue(cell)); }

InternalCallVerifier EqualityVerifier 
@Test public void testReadFromCorruptMobFilesWithReadEmptyValueOnMobCellMiss() throws Exception { TableName tn=TableName.valueOf("testReadFromCorruptMobFilesWithReadEmptyValueOnMobCellMiss"); setUp(0,tn); createRecordAndCorruptMobFile(tn,row1,family,qf1,Bytes.toBytes("value1")); Get get=new Get(row1); get.setAttribute(MobConstants.EMPTY_VALUE_ON_MOBCELL_MISS,Bytes.toBytes(true)); Result result=table.get(get); Cell cell=result.getColumnLatestCell(family,qf1); Assert.assertEquals(0,cell.getValueLength()); }

Class: org.apache.hadoop.hbase.regionserver.TestMultiVersionConcurrencyControlBasic

InternalCallVerifier EqualityVerifier 
@Test public void testSimpleMvccOps(){ MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); long readPoint=mvcc.getReadPoint(); MultiVersionConcurrencyControl.WriteEntry writeEntry=mvcc.begin(); mvcc.completeAndWait(writeEntry); assertEquals(readPoint + 1,mvcc.getReadPoint()); writeEntry=mvcc.begin(); mvcc.complete(writeEntry); assertEquals(readPoint + 2,mvcc.getWritePoint()); }

Class: org.apache.hadoop.hbase.regionserver.TestPerColumnFamilyFlush

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testSelectiveFlushWhenNotEnabled() throws IOException { Configuration conf=HBaseConfiguration.create(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,200 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,FlushAllStoresPolicy.class.getName()); HRegion region=initHRegion("testSelectiveFlushWhenNotEnabled",conf); for (int i=1; i <= 1200; i++) { region.put(createPut(1,i)); if (i <= 100) { region.put(createPut(2,i)); if (i <= 50) { region.put(createPut(3,i)); } } } long totalMemstoreSize=region.getMemstoreSize(); long cf1MemstoreSize=region.getStore(FAMILY1).getMemStoreSize(); long cf2MemstoreSize=region.getStore(FAMILY2).getMemStoreSize(); long cf3MemstoreSize=region.getStore(FAMILY3).getMemStoreSize(); assertTrue(cf1MemstoreSize > 0); assertTrue(cf2MemstoreSize > 0); assertTrue(cf3MemstoreSize > 0); assertEquals(totalMemstoreSize + 3 * DefaultMemStore.DEEP_OVERHEAD,cf1MemstoreSize + cf2MemstoreSize + cf3MemstoreSize); region.flush(false); cf1MemstoreSize=region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize=region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize=region.getStore(FAMILY3).getMemStoreSize(); totalMemstoreSize=region.getMemstoreSize(); long smallestSeqInRegionCurrentMemstore=region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(DefaultMemStore.DEEP_OVERHEAD,cf1MemstoreSize); assertEquals(DefaultMemStore.DEEP_OVERHEAD,cf2MemstoreSize); assertEquals(DefaultMemStore.DEEP_OVERHEAD,cf3MemstoreSize); assertEquals(0,totalMemstoreSize); assertEquals(HConstants.NO_SEQNUM,smallestSeqInRegionCurrentMemstore); HBaseTestingUtility.closeRegionAndWAL(region); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * When a log roll is about to happen, we do a flush of the regions who will be affected by the * log roll. These flushes cannot be a selective flushes, otherwise we cannot roll the logs. This * test ensures that we do a full-flush in that scenario. * @throws IOException */ @Test(timeout=180000) public void testFlushingWhenLogRolling() throws Exception { TableName tableName=TableName.valueOf("testFlushingWhenLogRolling"); Configuration conf=TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,128 * 1024 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,FlushLargeStoresPolicy.class.getName()); long cfFlushSizeLowerBound=2048; conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,cfFlushSizeLowerBound); conf.setLong("hbase.regionserver.logroll.period",60L * 60 * 1000); conf.setLong("hbase.regionserver.hlog.blocksize",128L * 1024 * 1024); final int maxLogs=10; conf.setInt("hbase.regionserver.maxlogs",maxLogs); final int numRegionServers=1; TEST_UTIL.startMiniCluster(numRegionServers); try { Table table=TEST_UTIL.createTable(tableName,FAMILIES); try (Admin admin=TEST_UTIL.getConnection().getAdmin()){ admin.flush(TableName.NAMESPACE_TABLE_NAME); } Pair desiredRegionAndServer=getRegionWithName(tableName); final Region desiredRegion=desiredRegionAndServer.getFirst(); assertTrue("Could not find a region which hosts the new region.",desiredRegion != null); LOG.info("Writing to region=" + desiredRegion); for (int i=1; i <= 3; i++) { table.put(createPut(i,0)); } for (int i=0; i < maxLogs; i++) { for (int j=0; j < 100; j++) { table.put(createPut(1,i * 100 + j)); } int currentNumRolledLogFiles=getNumRolledLogFiles(desiredRegion); assertNull(getWAL(desiredRegion).rollWriter()); while (getNumRolledLogFiles(desiredRegion) <= currentNumRolledLogFiles) { Thread.sleep(100); } } table.close(); assertEquals(maxLogs,getNumRolledLogFiles(desiredRegion)); assertTrue(desiredRegion.getStore(FAMILY1).getMemStoreSize() > cfFlushSizeLowerBound); assertTrue(desiredRegion.getStore(FAMILY2).getMemStoreSize() < cfFlushSizeLowerBound); assertTrue(desiredRegion.getStore(FAMILY3).getMemStoreSize() < cfFlushSizeLowerBound); table.put(createPut(1,12345678)); desiredRegionAndServer.getSecond().walRoller.requestRollAll(); TEST_UTIL.waitFor(30000,new Waiter.ExplainingPredicate(){ @Override public boolean evaluate() throws Exception { return desiredRegion.getMemstoreSize() == 0; } @Override public String explainFailure() throws Exception { long memstoreSize=desiredRegion.getMemstoreSize(); if (memstoreSize > 0) { return "Still have unflushed entries in memstore, memstore size is " + memstoreSize; } return "Unknown"; } } ); LOG.info("Finished waiting on flush after too many WALs..."); assertEquals(DefaultMemStore.DEEP_OVERHEAD,desiredRegion.getStore(FAMILY1).getMemStoreSize()); assertEquals(DefaultMemStore.DEEP_OVERHEAD,desiredRegion.getStore(FAMILY2).getMemStoreSize()); assertEquals(DefaultMemStore.DEEP_OVERHEAD,desiredRegion.getStore(FAMILY3).getMemStoreSize()); assertNull(getWAL(desiredRegion).rollWriter(true)); assertTrue(getNumRolledLogFiles(desiredRegion) < maxLogs); } finally { TEST_UTIL.shutdownMiniCluster(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testSelectiveFlushWhenEnabled() throws IOException { Configuration conf=HBaseConfiguration.create(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,200 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,FlushLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,100 * 1024); Region region=initHRegion("testSelectiveFlushWhenEnabled",conf); for (int i=1; i <= 1200; i++) { region.put(createPut(1,i)); if (i <= 100) { region.put(createPut(2,i)); if (i <= 50) { region.put(createPut(3,i)); } } } long totalMemstoreSize=region.getMemstoreSize(); long smallestSeqCF1=region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2=region.getOldestSeqIdOfStore(FAMILY2); long smallestSeqCF3=region.getOldestSeqIdOfStore(FAMILY3); long cf1MemstoreSize=region.getStore(FAMILY1).getMemStoreSize(); long cf2MemstoreSize=region.getStore(FAMILY2).getMemStoreSize(); long cf3MemstoreSize=region.getStore(FAMILY3).getMemStoreSize(); long smallestSeqInRegionCurrentMemstore=getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(smallestSeqCF1,smallestSeqInRegionCurrentMemstore); assertTrue(smallestSeqCF1 < smallestSeqCF2); assertTrue(smallestSeqCF2 < smallestSeqCF3); assertTrue(cf1MemstoreSize > 0); assertTrue(cf2MemstoreSize > 0); assertTrue(cf3MemstoreSize > 0); assertEquals(totalMemstoreSize + 3 * DefaultMemStore.DEEP_OVERHEAD,cf1MemstoreSize + cf2MemstoreSize + cf3MemstoreSize); region.flush(false); long oldCF2MemstoreSize=cf2MemstoreSize; long oldCF3MemstoreSize=cf3MemstoreSize; cf1MemstoreSize=region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize=region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize=region.getStore(FAMILY3).getMemStoreSize(); totalMemstoreSize=region.getMemstoreSize(); smallestSeqInRegionCurrentMemstore=getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(DefaultMemStore.DEEP_OVERHEAD,cf1MemstoreSize); assertEquals(cf2MemstoreSize,oldCF2MemstoreSize); assertEquals(cf3MemstoreSize,oldCF3MemstoreSize); assertEquals(smallestSeqInRegionCurrentMemstore,smallestSeqCF2); assertEquals(totalMemstoreSize + 2 * DefaultMemStore.DEEP_OVERHEAD,cf2MemstoreSize + cf3MemstoreSize); for (int i=1200; i < 2400; i++) { region.put(createPut(2,i)); if (i - 1200 < 100) { region.put(createPut(3,i)); } } oldCF3MemstoreSize=region.getStore(FAMILY3).getMemStoreSize(); region.flush(false); cf1MemstoreSize=region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize=region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize=region.getStore(FAMILY3).getMemStoreSize(); totalMemstoreSize=region.getMemstoreSize(); smallestSeqInRegionCurrentMemstore=getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(DefaultMemStore.DEEP_OVERHEAD,cf1MemstoreSize); assertEquals(DefaultMemStore.DEEP_OVERHEAD,cf2MemstoreSize); assertEquals(cf3MemstoreSize,oldCF3MemstoreSize); assertEquals(totalMemstoreSize + DefaultMemStore.DEEP_OVERHEAD,cf3MemstoreSize); assertEquals(smallestSeqInRegionCurrentMemstore,smallestSeqCF3); region.flush(true); for (int i=1; i <= 300; i++) { region.put(createPut(1,i)); region.put(createPut(2,i)); region.put(createPut(3,i)); region.put(createPut(4,i)); region.put(createPut(5,i)); } region.flush(false); assertEquals(0,region.getMemstoreSize()); HBaseTestingUtility.closeRegionAndWAL(region); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testCompareStoreFileCount() throws Exception { long memstoreFlushSize=1024L * 1024; Configuration conf=TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,memstoreFlushSize); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,FlushAllStoresPolicy.class.getName()); conf.setInt(HStore.BLOCKING_STOREFILES_KEY,10000); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,ConstantSizeRegionSplitPolicy.class.getName()); HTableDescriptor htd=new HTableDescriptor(TABLENAME); htd.setCompactionEnabled(false); htd.addFamily(new HColumnDescriptor(FAMILY1)); htd.addFamily(new HColumnDescriptor(FAMILY2)); htd.addFamily(new HColumnDescriptor(FAMILY3)); LOG.info("==============Test with selective flush disabled==============="); int cf1StoreFileCount=-1; int cf2StoreFileCount=-1; int cf3StoreFileCount=-1; int cf1StoreFileCount1=-1; int cf2StoreFileCount1=-1; int cf3StoreFileCount1=-1; try { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); TEST_UTIL.getHBaseAdmin().createTable(htd); TEST_UTIL.waitTableAvailable(TABLENAME); Connection conn=ConnectionFactory.createConnection(conf); Table table=conn.getTable(TABLENAME); doPut(table,memstoreFlushSize); table.close(); conn.close(); Region region=getRegionWithName(TABLENAME).getFirst(); cf1StoreFileCount=region.getStore(FAMILY1).getStorefilesCount(); cf2StoreFileCount=region.getStore(FAMILY2).getStorefilesCount(); cf3StoreFileCount=region.getStore(FAMILY3).getStorefilesCount(); } finally { TEST_UTIL.shutdownMiniCluster(); } LOG.info("==============Test with selective flush enabled==============="); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,FlushLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,0); try { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); TEST_UTIL.getHBaseAdmin().createTable(htd); Connection conn=ConnectionFactory.createConnection(conf); Table table=conn.getTable(TABLENAME); doPut(table,memstoreFlushSize); table.close(); conn.close(); Region region=getRegionWithName(TABLENAME).getFirst(); cf1StoreFileCount1=region.getStore(FAMILY1).getStorefilesCount(); cf2StoreFileCount1=region.getStore(FAMILY2).getStorefilesCount(); cf3StoreFileCount1=region.getStore(FAMILY3).getStorefilesCount(); } finally { TEST_UTIL.shutdownMiniCluster(); } LOG.info("disable selective flush: " + Bytes.toString(FAMILY1) + "=>"+ cf1StoreFileCount+ ", "+ Bytes.toString(FAMILY2)+ "=>"+ cf2StoreFileCount+ ", "+ Bytes.toString(FAMILY3)+ "=>"+ cf3StoreFileCount); LOG.info("enable selective flush: " + Bytes.toString(FAMILY1) + "=>"+ cf1StoreFileCount1+ ", "+ Bytes.toString(FAMILY2)+ "=>"+ cf2StoreFileCount1+ ", "+ Bytes.toString(FAMILY3)+ "=>"+ cf3StoreFileCount1); assertTrue(cf1StoreFileCount1 < cf1StoreFileCount); assertTrue(cf2StoreFileCount1 < cf2StoreFileCount); }

Class: org.apache.hadoop.hbase.regionserver.TestPriorityRpc

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testQosFunctionWithoutKnownArgument() throws IOException { RequestHeader.Builder headerBuilder=RequestHeader.newBuilder(); headerBuilder.setMethodName("foo"); RequestHeader header=headerBuilder.build(); PriorityFunction qosFunc=regionServer.rpcServices.getPriority(); assertEquals(HConstants.NORMAL_QOS,qosFunc.getPriority(header,null,User.createUserForTesting(regionServer.conf,"someuser",new String[]{"somegroup"}))); }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testQosFunctionForRequestCalledBySuperUser() throws Exception { RequestHeader.Builder headerBuilder=RequestHeader.newBuilder(); headerBuilder.setMethodName("foo"); RequestHeader header=headerBuilder.build(); PriorityFunction qosFunc=regionServer.rpcServices.getPriority(); regionServer.conf.set(Superusers.SUPERUSER_CONF_KEY,"samplesuperuser"); Superusers.initialize(regionServer.conf); assertEquals(HConstants.ADMIN_QOS,qosFunc.getPriority(header,null,User.createUserForTesting(regionServer.conf,"samplesuperuser",new String[]{"somegroup"}))); regionServer.conf.set(Superusers.SUPERUSER_CONF_KEY,"@samplesupergroup"); Superusers.initialize(regionServer.conf); assertEquals(HConstants.ADMIN_QOS,qosFunc.getPriority(header,null,User.createUserForTesting(regionServer.conf,"regularuser",new String[]{"samplesupergroup"}))); }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testQosFunctionForMeta() throws IOException { priority=regionServer.rpcServices.getPriority(); RequestHeader.Builder headerBuilder=RequestHeader.newBuilder(); headerBuilder.setMethodName("foo"); GetRequest.Builder getRequestBuilder=GetRequest.newBuilder(); RegionSpecifier.Builder regionSpecifierBuilder=RegionSpecifier.newBuilder(); regionSpecifierBuilder.setType(RegionSpecifierType.REGION_NAME); ByteString name=ByteStringer.wrap(HRegionInfo.FIRST_META_REGIONINFO.getRegionName()); regionSpecifierBuilder.setValue(name); RegionSpecifier regionSpecifier=regionSpecifierBuilder.build(); getRequestBuilder.setRegion(regionSpecifier); Get.Builder getBuilder=Get.newBuilder(); getBuilder.setRow(ByteStringer.wrap("somerow".getBytes())); getRequestBuilder.setGet(getBuilder.build()); GetRequest getRequest=getRequestBuilder.build(); RequestHeader header=headerBuilder.build(); HRegion mockRegion=Mockito.mock(HRegion.class); HRegionServer mockRS=Mockito.mock(HRegionServer.class); RSRpcServices mockRpc=Mockito.mock(RSRpcServices.class); Mockito.when(mockRS.getRSRpcServices()).thenReturn(mockRpc); HRegionInfo mockRegionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(mockRpc.getRegion((RegionSpecifier)Mockito.any())).thenReturn(mockRegion); Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(true); ((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS); assertEquals(HConstants.SYSTEMTABLE_QOS,priority.getPriority(header,getRequest,User.createUserForTesting(regionServer.conf,"someuser",new String[]{"somegroup"}))); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testQosFunctionForScanMethod() throws IOException { RequestHeader.Builder headerBuilder=RequestHeader.newBuilder(); headerBuilder.setMethodName("Scan"); RequestHeader header=headerBuilder.build(); ScanRequest.Builder scanBuilder=ScanRequest.newBuilder(); ScanRequest scanRequest=scanBuilder.build(); HRegion mockRegion=Mockito.mock(HRegion.class); HRegionServer mockRS=Mockito.mock(HRegionServer.class); RSRpcServices mockRpc=Mockito.mock(RSRpcServices.class); Mockito.when(mockRS.getRSRpcServices()).thenReturn(mockRpc); HRegionInfo mockRegionInfo=Mockito.mock(HRegionInfo.class); Mockito.when(mockRpc.getRegion((RegionSpecifier)Mockito.any())).thenReturn(mockRegion); Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(false); ((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS); int qos=priority.getPriority(header,scanRequest,User.createUserForTesting(regionServer.conf,"someuser",new String[]{"somegroup"})); assertTrue("" + qos,qos == HConstants.NORMAL_QOS); scanBuilder=ScanRequest.newBuilder(); scanBuilder.setScannerId(12345); scanRequest=scanBuilder.build(); RegionScanner mockRegionScanner=Mockito.mock(RegionScanner.class); Mockito.when(mockRpc.getScanner(12345)).thenReturn(mockRegionScanner); Mockito.when(mockRegionScanner.getRegionInfo()).thenReturn(mockRegionInfo); Mockito.when(mockRpc.getRegion((RegionSpecifier)Mockito.any())).thenReturn(mockRegion); Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(true); ((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS); assertEquals(HConstants.SYSTEMTABLE_QOS,priority.getPriority(header,scanRequest,User.createUserForTesting(regionServer.conf,"someuser",new String[]{"somegroup"}))); Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(false); assertEquals(HConstants.NORMAL_QOS,priority.getPriority(header,scanRequest,User.createUserForTesting(regionServer.conf,"someuser",new String[]{"somegroup"}))); }

Class: org.apache.hadoop.hbase.regionserver.TestRSKilledWhenInitializing

InternalCallVerifier EqualityVerifier 
/** * Test verifies whether a region server is removing from online servers list in master if it went * down after registering with master. * @throws Exception */ @Test(timeout=180000) public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNod() throws Exception { final int NUM_MASTERS=1; final int NUM_RS=2; firstRS.set(true); Configuration conf=HBaseConfiguration.create(); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,1); final HBaseTestingUtility TEST_UTIL=new HBaseTestingUtility(conf); TEST_UTIL.startMiniDFSCluster(3); TEST_UTIL.startMiniZKCluster(); TEST_UTIL.createRootDir(); final LocalHBaseCluster cluster=new LocalHBaseCluster(conf,NUM_MASTERS,NUM_RS,HMaster.class,MockedRegionServer.class); final MasterThread master=cluster.getMasters().get(0); master.start(); try { long startTime=System.currentTimeMillis(); while (!master.getMaster().isInitialized()) { try { Thread.sleep(100); } catch ( InterruptedException ignored) { } if (System.currentTimeMillis() > startTime + 30000) { throw new RuntimeException("Master not active after 30 seconds"); } } masterActive=true; cluster.getRegionServers().get(0).start(); cluster.getRegionServers().get(1).start(); Thread.sleep(10000); List onlineServersList=master.getMaster().getServerManager().getOnlineServersList(); while (onlineServersList.size() > 2) { Thread.sleep(100); onlineServersList=master.getMaster().getServerManager().getOnlineServersList(); } assertEquals(onlineServersList.size(),2); cluster.shutdown(); } finally { masterActive=false; firstRS.set(true); TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.regionserver.TestRecoveredEdits

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask. * Create a region. Close it. Then copy into place a file to replay, one that is bigger than * configured flush size so we bring on lots of flushes. Then reopen and confirm all edits * made it in. * @throws IOException */ @Test(timeout=60000) public void testReplayWorksThoughLotsOfFlushing() throws IOException { Configuration conf=new Configuration(TEST_UTIL.getConfiguration()); conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,1024 * 1024); final String encodedRegionName="4823016d8fca70b25503ee07f4c6d79f"; HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(testName.getMethodName())); final String columnFamily="meta"; byte[][] columnFamilyAsByteArray=new byte[][]{Bytes.toBytes(columnFamily)}; htd.addFamily(new HColumnDescriptor(columnFamily)); HRegionInfo hri=new HRegionInfo(htd.getTableName()){ @Override public synchronized String getEncodedName(){ return encodedRegionName; } private byte[] encodedRegionNameAsBytes=null; @Override public synchronized byte[] getEncodedNameAsBytes(){ if (encodedRegionNameAsBytes == null) { this.encodedRegionNameAsBytes=Bytes.toBytes(getEncodedName()); } return this.encodedRegionNameAsBytes; } } ; Path hbaseRootDir=TEST_UTIL.getDataTestDir(); FileSystem fs=FileSystem.get(TEST_UTIL.getConfiguration()); Path tableDir=FSUtils.getTableDir(hbaseRootDir,htd.getTableName()); HRegionFileSystem hrfs=new HRegionFileSystem(TEST_UTIL.getConfiguration(),fs,tableDir,hri); if (fs.exists(hrfs.getRegionDir())) { LOG.info("Region directory already exists. Deleting."); fs.delete(hrfs.getRegionDir(),true); } HRegion region=HRegion.createHRegion(hri,hbaseRootDir,conf,htd,null); assertEquals(encodedRegionName,region.getRegionInfo().getEncodedName()); List storeFiles=region.getStoreFileList(columnFamilyAsByteArray); assertTrue(storeFiles.isEmpty()); region.close(); Path regionDir=region.getRegionDir(hbaseRootDir,hri); Path recoveredEditsDir=WALSplitter.getRegionDirRecoveredEditsDir(regionDir); Path recoveredEditsFile=new Path(System.getProperty("test.build.classes","target/test-classes"),"0000000000000016310"); Path destination=new Path(recoveredEditsDir,recoveredEditsFile.getName()); fs.copyToLocalFile(recoveredEditsFile,destination); assertTrue(fs.exists(destination)); region=HRegion.openHRegion(region,null); assertEquals(encodedRegionName,region.getRegionInfo().getEncodedName()); storeFiles=region.getStoreFileList(columnFamilyAsByteArray); assertTrue("Files count=" + storeFiles.size(),storeFiles.size() > 10); int count=verifyAllEditsMadeItIn(fs,conf,recoveredEditsFile,region); LOG.info("Checked " + count + " edits made it in"); }

Class: org.apache.hadoop.hbase.regionserver.TestRegionIncrement

InternalCallVerifier EqualityVerifier 
/** * Have each thread update its own Cell. Avoid contention with another thread. * This is * @throws IOException * @throws InterruptedException */ @Test public void testContendedAcrossCellsIncrement() throws IOException, InterruptedException { final HRegion region=getRegion(TEST_UTIL.getConfiguration(),TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); long startTime=System.currentTimeMillis(); try { CrossRowCellIncrementer[] threads=new CrossRowCellIncrementer[THREAD_COUNT]; for (int i=0; i < threads.length; i++) { threads[i]=new CrossRowCellIncrementer(i,INCREMENT_COUNT,region,THREAD_COUNT); } for (int i=0; i < threads.length; i++) { threads[i].start(); } for (int i=0; i < threads.length; i++) { threads[i].join(); } RegionScanner regionScanner=region.getScanner(new Scan()); List cells=new ArrayList(100); while (regionScanner.next(cells)) continue; assertEquals(THREAD_COUNT,cells.size()); long total=0; for ( Cell cell : cells) total+=Bytes.toLong(cell.getValueArray(),cell.getValueOffset(),cell.getValueLength()); assertEquals(INCREMENT_COUNT * THREAD_COUNT,total); } finally { closeRegion(region); LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime)+ "ms"); } }

InternalCallVerifier EqualityVerifier 
/** * Have each thread update its own Cell. Avoid contention with another thread. * @throws IOException * @throws InterruptedException */ @Test public void testUnContendedSingleCellIncrement() throws IOException, InterruptedException { final HRegion region=getRegion(TEST_UTIL.getConfiguration(),TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); long startTime=System.currentTimeMillis(); try { SingleCellIncrementer[] threads=new SingleCellIncrementer[THREAD_COUNT]; for (int i=0; i < threads.length; i++) { byte[] rowBytes=Bytes.toBytes(i); Increment increment=new Increment(rowBytes); increment.addColumn(INCREMENT_BYTES,INCREMENT_BYTES,1); threads[i]=new SingleCellIncrementer(i,INCREMENT_COUNT,region,increment); } for (int i=0; i < threads.length; i++) { threads[i].start(); } for (int i=0; i < threads.length; i++) { threads[i].join(); } RegionScanner regionScanner=region.getScanner(new Scan()); List cells=new ArrayList(THREAD_COUNT); while (regionScanner.next(cells)) continue; assertEquals(THREAD_COUNT,cells.size()); long total=0; for ( Cell cell : cells) total+=Bytes.toLong(cell.getValueArray(),cell.getValueOffset(),cell.getValueLength()); assertEquals(INCREMENT_COUNT * THREAD_COUNT,total); } finally { closeRegion(region); LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime)+ "ms"); } }

Class: org.apache.hadoop.hbase.regionserver.TestRegionMergeTransaction

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWholesomeMerge() throws IOException, InterruptedException { final int rowCountOfRegionA=loadRegion(this.region_a,CF,true); final int rowCountOfRegionB=loadRegion(this.region_b,CF,true); assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0); assertEquals(rowCountOfRegionA,countRows(this.region_a)); assertEquals(rowCountOfRegionB,countRows(this.region_b)); RegionMergeTransactionImpl mt=prepareOnGoodRegions(); TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT,0); CoordinatedStateManager cp=CoordinatedStateManagerFactory.getCoordinatedStateManager(TEST_UTIL.getConfiguration()); Server mockServer=new HRegionServer(TEST_UTIL.getConfiguration(),cp); HRegion mergedRegion=(HRegion)mt.execute(mockServer,null); assertTrue(this.fs.exists(mt.getMergesDir())); assertTrue(region_a.isClosed()); assertTrue(region_b.isClosed()); assertEquals(0,this.fs.listStatus(mt.getMergesDir()).length); assertTrue(Bytes.equals(this.region_a.getRegionInfo().getStartKey(),mergedRegion.getRegionInfo().getStartKey())); assertTrue(Bytes.equals(this.region_b.getRegionInfo().getEndKey(),mergedRegion.getRegionInfo().getEndKey())); try { int mergedRegionRowCount=countRows(mergedRegion); assertEquals((rowCountOfRegionA + rowCountOfRegionB),mergedRegionRowCount); } finally { HBaseTestingUtility.closeRegionAndWAL(mergedRegion); } assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFailAfterPONR() throws IOException, KeeperException, InterruptedException { final int rowCountOfRegionA=loadRegion(this.region_a,CF,true); final int rowCountOfRegionB=loadRegion(this.region_b,CF,true); assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0); assertEquals(rowCountOfRegionA,countRows(this.region_a)); assertEquals(rowCountOfRegionB,countRows(this.region_b)); RegionMergeTransactionImpl mt=prepareOnGoodRegions(); Mockito.doThrow(new MockedFailedMergedRegionOpen()).when(mt).openMergedRegion((Server)Mockito.anyObject(),(RegionServerServices)Mockito.anyObject(),(HRegion)Mockito.anyObject()); boolean expectedException=false; TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT,0); CoordinatedStateManager cp=CoordinatedStateManagerFactory.getCoordinatedStateManager(TEST_UTIL.getConfiguration()); Server mockServer=new HRegionServer(TEST_UTIL.getConfiguration(),cp); try { mt.execute(mockServer,null); } catch ( MockedFailedMergedRegionOpen e) { expectedException=true; } assertTrue(expectedException); assertFalse(mt.rollback(null,null)); Path tableDir=this.region_a.getRegionFileSystem().getRegionDir().getParent(); Path mergedRegionDir=new Path(tableDir,mt.getMergedRegionInfo().getEncodedName()); assertTrue(TEST_UTIL.getTestFileSystem().exists(mergedRegionDir)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRollback() throws IOException, InterruptedException { final int rowCountOfRegionA=loadRegion(this.region_a,CF,true); final int rowCountOfRegionB=loadRegion(this.region_b,CF,true); assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0); assertEquals(rowCountOfRegionA,countRows(this.region_a)); assertEquals(rowCountOfRegionB,countRows(this.region_b)); RegionMergeTransactionImpl mt=prepareOnGoodRegions(); when(mt.createMergedRegionFromMerges(region_a,region_b,mt.getMergedRegionInfo())).thenThrow(new MockedFailedMergedRegionCreation()); boolean expectedException=false; TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT,0); CoordinatedStateManager cp=CoordinatedStateManagerFactory.getCoordinatedStateManager(TEST_UTIL.getConfiguration()); Server mockServer=new HRegionServer(TEST_UTIL.getConfiguration(),cp); try { mt.execute(mockServer,null); } catch ( MockedFailedMergedRegionCreation e) { expectedException=true; } assertTrue(expectedException); assertTrue(mt.rollback(null,null)); int rowCountOfRegionA2=countRows(this.region_a); assertEquals(rowCountOfRegionA,rowCountOfRegionA2); int rowCountOfRegionB2=countRows(this.region_b); assertEquals(rowCountOfRegionB,rowCountOfRegionB2); assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir,mt.getMergedRegionInfo()))); assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); assertTrue(mt.prepare(null)); HRegion mergedRegion=(HRegion)mt.execute(mockServer,null); try { int mergedRegionRowCount=countRows(mergedRegion); assertEquals((rowCountOfRegionA + rowCountOfRegionB),mergedRegionRowCount); } finally { HBaseTestingUtility.closeRegionAndWAL(mergedRegion); } assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); }

Class: org.apache.hadoop.hbase.regionserver.TestRegionMergeTransactionOnCluster

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testWholesomeMerge() throws Exception { LOG.info("Starting testWholesomeMerge"); final TableName tableName=TableName.valueOf("testWholesomeMerge"); Table table=createTableAndLoadData(master,tableName); mergeRegionsAndVerifyRegionNum(master,tableName,0,1,INITIAL_REGION_NUM - 1); PairOfSameType mergedRegions=mergeRegionsAndVerifyRegionNum(master,tableName,1,2,INITIAL_REGION_NUM - 2); verifyRowCount(table,ROWSIZE); HRegionInfo hri=RandomUtils.nextBoolean() ? mergedRegions.getFirst() : mergedRegions.getSecond(); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); AssignmentManager am=cluster.getMaster().getAssignmentManager(); RegionStates regionStates=am.getRegionStates(); long start=EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri,State.MERGED)) { assertFalse("Timed out in waiting one merged region to be in state MERGED",EnvironmentEdgeManager.currentTime() - start > 60000); Thread.sleep(500); } am.assign(hri,true); assertFalse("Merged region can't be assigned",regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri,State.MERGED)); am.unassign(hri,null); assertFalse("Merged region can't be unassigned",regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri,State.MERGED)); table.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@SuppressWarnings("deprecation") @Test public void testCleanMergeReference() throws Exception { LOG.info("Starting testCleanMergeReference"); admin.enableCatalogJanitor(false); try { final TableName tableName=TableName.valueOf("testCleanMergeReference"); Table table=createTableAndLoadData(master,tableName); mergeRegionsAndVerifyRegionNum(master,tableName,0,1,INITIAL_REGION_NUM - 1); verifyRowCount(table,ROWSIZE); table.close(); List> tableRegions=MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(),tableName); HRegionInfo mergedRegionInfo=tableRegions.get(0).getFirst(); HTableDescriptor tableDescriptor=master.getTableDescriptors().get(tableName); Result mergedRegionResult=MetaTableAccessor.getRegionResult(master.getConnection(),mergedRegionInfo.getRegionName()); assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,HConstants.MERGEA_QUALIFIER) != null); assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,HConstants.MERGEB_QUALIFIER) != null); PairOfSameType p=MetaTableAccessor.getMergeRegions(mergedRegionResult); HRegionInfo regionA=p.getFirst(); HRegionInfo regionB=p.getSecond(); FileSystem fs=master.getMasterFileSystem().getFileSystem(); Path rootDir=master.getMasterFileSystem().getRootDir(); Path tabledir=FSUtils.getTableDir(rootDir,mergedRegionInfo.getTable()); Path regionAdir=new Path(tabledir,regionA.getEncodedName()); Path regionBdir=new Path(tabledir,regionB.getEncodedName()); assertTrue(fs.exists(regionAdir)); assertTrue(fs.exists(regionBdir)); HColumnDescriptor[] columnFamilies=tableDescriptor.getColumnFamilies(); HRegionFileSystem hrfs=new HRegionFileSystem(TEST_UTIL.getConfiguration(),fs,tabledir,mergedRegionInfo); int count=0; for ( HColumnDescriptor colFamily : columnFamilies) { count+=hrfs.getStoreFiles(colFamily.getName()).size(); } admin.compactRegion(mergedRegionInfo.getRegionName()); long timeout=System.currentTimeMillis() + waitTime; int newcount=0; while (System.currentTimeMillis() < timeout) { for ( HColumnDescriptor colFamily : columnFamilies) { newcount+=hrfs.getStoreFiles(colFamily.getName()).size(); } if (newcount > count) { break; } Thread.sleep(50); } assertTrue(newcount > count); List regionServerThreads=TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for ( RegionServerThread rs : regionServerThreads) { CompactedHFilesDischarger cleaner=new CompactedHFilesDischarger(100,null,rs.getRegionServer(),false); cleaner.chore(); Thread.sleep(1000); } while (System.currentTimeMillis() < timeout) { int newcount1=0; for ( HColumnDescriptor colFamily : columnFamilies) { newcount1+=hrfs.getStoreFiles(colFamily.getName()).size(); } if (newcount1 <= 1) { break; } Thread.sleep(50); } int cleaned=0; while (cleaned == 0) { cleaned=admin.runCatalogScan(); LOG.debug("catalog janitor returned " + cleaned); Thread.sleep(50); } assertFalse(regionAdir.toString(),fs.exists(regionAdir)); assertFalse(regionBdir.toString(),fs.exists(regionBdir)); assertTrue(cleaned > 0); mergedRegionResult=MetaTableAccessor.getRegionResult(master.getConnection(),mergedRegionInfo.getRegionName()); assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,HConstants.MERGEA_QUALIFIER) != null); assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,HConstants.MERGEB_QUALIFIER) != null); } finally { admin.enableCatalogJanitor(true); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testMergeWithReplicas() throws Exception { final TableName tableName=TableName.valueOf("testMergeWithReplicas"); createTableAndLoadData(master,tableName,5,2); List> initialRegionToServers=MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(),tableName); PairOfSameType mergedRegions=mergeRegionsAndVerifyRegionNum(master,tableName,0,2,5 * 2 - 2); List> currentRegionToServers=MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(),tableName); List initialRegions=new ArrayList(); for ( Pair p : initialRegionToServers) { initialRegions.add(p.getFirst()); } List currentRegions=new ArrayList(); for ( Pair p : currentRegionToServers) { currentRegions.add(p.getFirst()); } assertTrue(initialRegions.contains(mergedRegions.getFirst())); assertTrue(initialRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(),1))); assertTrue(initialRegions.contains(mergedRegions.getSecond())); assertTrue(initialRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(),1))); assertTrue(!initialRegions.contains(currentRegions.get(0))); assertTrue(!initialRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0),1))); assertTrue(currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0),1))); assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(),1))); assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(),1))); }

Class: org.apache.hadoop.hbase.regionserver.TestRegionReplicas

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testOpenRegionReplica() throws Exception { openRegion(HTU,getRS(),hriSecondary); try { HTU.loadNumericRows(table,f,0,1000); Assert.assertEquals(1000,HTU.countRows(table)); } finally { HTU.deleteNumericRows(table,f,0,1000); closeRegion(HTU,getRS(),hriSecondary); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testVerifySecondaryAbilityToReadWithOnFiles() throws Exception { HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD,0); restartRegionServer(); try { LOG.info("Opening the secondary region " + hriSecondary.getEncodedName()); openRegion(HTU,getRS(),hriSecondary); LOG.info("Loading data to primary region"); for (int i=0; i < 3; ++i) { HTU.loadNumericRows(table,f,i * 1000,(i + 1) * 1000); Region region=getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); region.flush(true); } Region primaryRegion=getRS().getFromOnlineRegions(hriPrimary.getEncodedName()); Assert.assertEquals(3,primaryRegion.getStore(f).getStorefilesCount()); Region secondaryRegion=getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); secondaryRegion.getStore(f).refreshStoreFiles(); Assert.assertEquals(3,secondaryRegion.getStore(f).getStorefilesCount()); LOG.info("Force Major compaction on primary region " + hriPrimary); primaryRegion.compact(true); Assert.assertEquals(1,primaryRegion.getStore(f).getStorefilesCount()); List regionServerThreads=HTU.getMiniHBaseCluster().getRegionServerThreads(); HRegionServer hrs=null; for ( RegionServerThread rs : regionServerThreads) { if (rs.getRegionServer().getOnlineRegion(primaryRegion.getRegionInfo().getRegionName()) != null) { hrs=rs.getRegionServer(); break; } } CompactedHFilesDischarger cleaner=new CompactedHFilesDischarger(100,null,hrs,false); cleaner.chore(); int keys=0; int sum=0; for ( StoreFile sf : secondaryRegion.getStore(f).getStorefiles()) { LOG.debug(getRS().getFileSystem().exists(sf.getPath())); Assert.assertFalse(getRS().getFileSystem().exists(sf.getPath())); HFileScanner scanner=sf.getReader().getScanner(false,false); scanner.seekTo(); do { keys++; Cell cell=scanner.getCell(); sum+=Integer.parseInt(Bytes.toString(cell.getRowArray(),cell.getRowOffset(),cell.getRowLength())); } while (scanner.next()); } Assert.assertEquals(3000,keys); Assert.assertEquals(4498500,sum); } finally { HTU.deleteNumericRows(table,HConstants.CATALOG_FAMILY,0,1000); closeRegion(HTU,getRS(),hriSecondary); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testRefreshStoreFiles() throws Exception { final int refreshPeriod=2000; HTU.getConfiguration().setInt("hbase.hstore.compactionThreshold",100); HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD,refreshPeriod); restartRegionServer(); try { LOG.info("Opening the secondary region " + hriSecondary.getEncodedName()); openRegion(HTU,getRS(),hriSecondary); LOG.info("Loading data to primary region"); HTU.loadNumericRows(table,f,0,1000); Assert.assertEquals(1000,HTU.countRows(table)); LOG.info("Flushing primary region"); Region region=getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); region.flush(true); HRegion primaryRegion=(HRegion)region; LOG.info("Sleeping for " + (4 * refreshPeriod)); Threads.sleep(4 * refreshPeriod); LOG.info("Checking results from secondary region replica"); Region secondaryRegion=getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); Assert.assertEquals(1,secondaryRegion.getStore(f).getStorefilesCount()); assertGet(secondaryRegion,42,true); assertGetRpc(hriSecondary,42,true); assertGetRpc(hriSecondary,1042,false); HTU.loadNumericRows(table,f,1000,1100); region=getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); region.flush(true); HTU.loadNumericRows(table,f,2000,2100); region=getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); region.flush(true); Threads.sleep(4 * refreshPeriod); assertGetRpc(hriSecondary,42,true); assertGetRpc(hriSecondary,1042,true); assertGetRpc(hriSecondary,2042,true); Assert.assertEquals(3,secondaryRegion.getStore(f).getStorefilesCount()); HTU.compact(table.getName(),true); long wakeUpTime=System.currentTimeMillis() + 4 * refreshPeriod; while (System.currentTimeMillis() < wakeUpTime) { assertGetRpc(hriSecondary,42,true); assertGetRpc(hriSecondary,1042,true); assertGetRpc(hriSecondary,2042,true); Threads.sleep(10); } Assert.assertEquals(4,secondaryRegion.getStore(f).getStorefilesCount()); } finally { HTU.deleteNumericRows(table,HConstants.CATALOG_FAMILY,0,1000); closeRegion(HTU,getRS(),hriSecondary); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testRegionReplicaGets() throws Exception { try { HTU.loadNumericRows(table,f,0,1000); Assert.assertEquals(1000,HTU.countRows(table)); Region region=getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); region.flush(true); openRegion(HTU,getRS(),hriSecondary); region=getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); assertGet(region,42,true); assertGetRpc(hriSecondary,42,true); } finally { HTU.deleteNumericRows(table,HConstants.CATALOG_FAMILY,0,1000); closeRegion(HTU,getRS(),hriSecondary); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testGetOnTargetRegionReplica() throws Exception { try { HTU.loadNumericRows(table,f,0,1000); Assert.assertEquals(1000,HTU.countRows(table)); Region region=getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); region.flush(true); openRegion(HTU,getRS(),hriSecondary); byte[] row=Bytes.toBytes(String.valueOf(42)); Get get=new Get(row); get.setConsistency(Consistency.TIMELINE); get.setReplicaId(1); Result result=table.get(get); Assert.assertArrayEquals(row,result.getValue(f,null)); } finally { HTU.deleteNumericRows(table,HConstants.CATALOG_FAMILY,0,1000); closeRegion(HTU,getRS(),hriSecondary); } }

Class: org.apache.hadoop.hbase.regionserver.TestRegionServerHostname

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test(timeout=120000) public void testRegionServerHostname() throws Exception { final int NUM_MASTERS=1; final int NUM_RS=1; Enumeration netInterfaceList=NetworkInterface.getNetworkInterfaces(); while (netInterfaceList.hasMoreElements()) { NetworkInterface ni=netInterfaceList.nextElement(); Enumeration addrList=ni.getInetAddresses(); while (addrList.hasMoreElements()) { InetAddress addr=addrList.nextElement(); if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isMulticastAddress()) { continue; } String hostName=addr.getHostName(); LOG.info("Found " + hostName + " on "+ ni); TEST_UTIL.getConfiguration().set(HRegionServer.MASTER_HOSTNAME_KEY,hostName); TEST_UTIL.getConfiguration().set(HRegionServer.RS_HOSTNAME_KEY,hostName); TEST_UTIL.startMiniCluster(NUM_MASTERS,NUM_RS); try { ZooKeeperWatcher zkw=TEST_UTIL.getZooKeeperWatcher(); List servers=ZKUtil.listChildrenNoWatch(zkw,zkw.rsZNode); assertTrue(servers.size() == NUM_RS + 1); for ( String server : servers) { assertTrue("From zookeeper: " + server + " hostname: "+ hostName,server.startsWith(hostName.toLowerCase() + ",")); } zkw.close(); } finally { TEST_UTIL.shutdownMiniCluster(); } } } }

Class: org.apache.hadoop.hbase.regionserver.TestRegionServerMetrics

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testScanNextForSmallScan() throws IOException { String tableNameString="testScanNextSmall"; TableName tableName=TableName.valueOf(tableNameString); byte[] cf=Bytes.toBytes("d"); byte[] qualifier=Bytes.toBytes("qual"); byte[] val=Bytes.toBytes("One"); List puts=new ArrayList<>(); for (int insertCount=0; insertCount < 100; insertCount++) { Put p=new Put(Bytes.toBytes("" + insertCount + "row")); p.addColumn(cf,qualifier,val); puts.add(p); } try (Table t=TEST_UTIL.createTable(tableName,cf)){ t.put(puts); Scan s=new Scan(); s.setSmall(true); s.setCaching(1); ResultScanner resultScanners=t.getScanner(s); for (int nextCount=0; nextCount < NUM_SCAN_NEXT; nextCount++) { Result result=resultScanners.next(); assertNotNull(result); assertEquals(1,result.size()); } } numScanNext+=NUM_SCAN_NEXT; try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ for ( HRegionLocation location : locator.getAllRegionLocations()) { HRegionInfo i=location.getRegionInfo(); MetricsRegionAggregateSource agg=rs.getRegion(i.getRegionName()).getMetrics().getSource().getAggregateSource(); String prefix="namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + "_table_"+ tableNameString+ "_region_"+ i.getEncodedName()+ "_metric"; metricsHelper.assertCounter(prefix + "_scanNextNumOps",NUM_SCAN_NEXT,agg); } metricsHelper.assertCounter("ScanNext_num_ops",numScanNext,serverSource); } try (Admin admin=TEST_UTIL.getHBaseAdmin()){ admin.disableTable(tableName); admin.deleteTable(tableName); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testScanNext() throws IOException { String tableNameString="testScanNext"; TableName tableName=TableName.valueOf(tableNameString); byte[] cf=Bytes.toBytes("d"); byte[] qualifier=Bytes.toBytes("qual"); byte[] val=Bytes.toBytes("One"); List puts=new ArrayList<>(); for (int insertCount=0; insertCount < 100; insertCount++) { Put p=new Put(Bytes.toBytes("" + insertCount + "row")); p.addColumn(cf,qualifier,val); puts.add(p); } try (Table t=TEST_UTIL.createTable(tableName,cf)){ t.put(puts); Scan s=new Scan(); s.setBatch(1); s.setCaching(1); ResultScanner resultScanners=t.getScanner(s); for (int nextCount=0; nextCount < NUM_SCAN_NEXT; nextCount++) { Result result=resultScanners.next(); assertNotNull(result); assertEquals(1,result.size()); } } numScanNext+=NUM_SCAN_NEXT; try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ for ( HRegionLocation location : locator.getAllRegionLocations()) { HRegionInfo i=location.getRegionInfo(); MetricsRegionAggregateSource agg=rs.getRegion(i.getRegionName()).getMetrics().getSource().getAggregateSource(); String prefix="namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + "_table_"+ tableNameString+ "_region_"+ i.getEncodedName()+ "_metric"; metricsHelper.assertCounter(prefix + "_scanNextNumOps",NUM_SCAN_NEXT,agg); } metricsHelper.assertCounter("ScanNext_num_ops",numScanNext,serverSource); } try (Admin admin=TEST_UTIL.getHBaseAdmin()){ admin.disableTable(tableName); admin.deleteTable(tableName); } }

Class: org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port * for openRegion. The region server should reject this RPC. (HBASE-9721) */ @Test public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception { Assert.assertTrue(getRS().getRegion(regionName).isAvailable()); ServerName sn=getRS().getServerName(); ServerName earlierServerName=ServerName.valueOf(sn.getHostname(),sn.getPort(),1); try { CloseRegionRequest request=RequestConverter.buildCloseRegionRequest(earlierServerName,regionName); getRS().getRSRpcServices().closeRegion(null,request); Assert.fail("The closeRegion should have been rejected"); } catch ( ServiceException se) { Assert.assertTrue(se.getCause() instanceof IOException); Assert.assertTrue(se.getCause().getMessage().contains("This RPC was intended for a different server")); } closeRegionNoZK(); try { AdminProtos.OpenRegionRequest orr=RequestConverter.buildOpenRegionRequest(earlierServerName,hri,null,null); getRS().getRSRpcServices().openRegion(null,orr); Assert.fail("The openRegion should have been rejected"); } catch ( ServiceException se) { Assert.assertTrue(se.getCause() instanceof IOException); Assert.assertTrue(se.getCause().getMessage().contains("This RPC was intended for a different server")); } finally { openRegion(HTU,getRS(),hri); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that if we do a close while opening it stops the opening. */ @Test(timeout=60000) public void testCancelOpeningWithoutZK() throws Exception { closeRegionNoZK(); checkRegionIsClosed(HTU,getRS(),hri); getRS().getRegionsInTransitionInRS().put(hri.getEncodedNameAsBytes(),Boolean.TRUE); AdminProtos.CloseRegionRequest crr=RequestConverter.buildCloseRegionRequest(getRS().getServerName(),regionName); try { getRS().rpcServices.closeRegion(null,crr); Assert.assertTrue(false); } catch ( ServiceException expected) { } Assert.assertEquals(Boolean.FALSE,getRS().getRegionsInTransitionInRS().get(hri.getEncodedNameAsBytes())); HTableDescriptor htd=getRS().tableDescriptors.get(hri.getTable()); getRS().service.submit(new OpenRegionHandler(getRS(),getRS(),hri,htd,-1)); checkRegionIsClosed(HTU,getRS(),hri); openRegion(HTU,getRS(),hri); }

Class: org.apache.hadoop.hbase.regionserver.TestRegionServerOnlineConfigChange

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Check if the number of compaction threads changes online * @throws IOException */ @Test public void testNumCompactionThreadsOnlineChange() throws IOException { assertTrue(rs1.compactSplitThread != null); int newNumSmallThreads=rs1.compactSplitThread.getSmallCompactionThreadNum() + 1; int newNumLargeThreads=rs1.compactSplitThread.getLargeCompactionThreadNum() + 1; conf.setInt("hbase.regionserver.thread.compaction.small",newNumSmallThreads); conf.setInt("hbase.regionserver.thread.compaction.large",newNumLargeThreads); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newNumSmallThreads,rs1.compactSplitThread.getSmallCompactionThreadNum()); assertEquals(newNumLargeThreads,rs1.compactSplitThread.getLargeCompactionThreadNum()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the configurations in the CompactionConfiguration class change * properly. * @throws IOException */ @Test public void testCompactionConfigurationOnlineChange() throws IOException { String strPrefix="hbase.hstore.compaction."; Store s=r1.getStore(COLUMN_FAMILY1); if (!(s instanceof HStore)) { LOG.error("Can't test the compaction configuration of HStore class. " + "Got a different implementation other than HStore"); return; } HStore hstore=(HStore)s; double newCompactionRatio=hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatio() + 0.1; conf.setFloat(strPrefix + "ratio",(float)newCompactionRatio); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newCompactionRatio,hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatio(),0.00001); double newOffPeakCompactionRatio=hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatioOffPeak() + 0.1; conf.setFloat(strPrefix + "ratio.offpeak",(float)newOffPeakCompactionRatio); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newOffPeakCompactionRatio,hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatioOffPeak(),0.00001); long newThrottlePoint=hstore.getStoreEngine().getCompactionPolicy().getConf().getThrottlePoint() + 10; conf.setLong("hbase.regionserver.thread.compaction.throttle",newThrottlePoint); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newThrottlePoint,hstore.getStoreEngine().getCompactionPolicy().getConf().getThrottlePoint()); int newMinFilesToCompact=hstore.getStoreEngine().getCompactionPolicy().getConf().getMinFilesToCompact() + 1; conf.setLong(strPrefix + "min",newMinFilesToCompact); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMinFilesToCompact,hstore.getStoreEngine().getCompactionPolicy().getConf().getMinFilesToCompact()); int newMaxFilesToCompact=hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact() + 1; conf.setLong(strPrefix + "max",newMaxFilesToCompact); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMaxFilesToCompact,hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact()); conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_START_HOUR,6); conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_END_HOUR,7); rs1.getConfigurationManager().notifyAllObservers(conf); assertFalse(hstore.getOffPeakHours().isOffPeakHour(4)); long newMinCompactSize=hstore.getStoreEngine().getCompactionPolicy().getConf().getMinCompactSize() + 1; conf.setLong(strPrefix + "min.size",newMinCompactSize); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMinCompactSize,hstore.getStoreEngine().getCompactionPolicy().getConf().getMinCompactSize()); long newMaxCompactSize=hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxCompactSize() - 1; conf.setLong(strPrefix + "max.size",newMaxCompactSize); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMaxCompactSize,hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxCompactSize()); long newOffpeakMaxCompactSize=hstore.getStoreEngine().getCompactionPolicy().getConf().getOffPeakMaxCompactSize() - 1; conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY,newOffpeakMaxCompactSize); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newOffpeakMaxCompactSize,hstore.getStoreEngine().getCompactionPolicy().getConf().getOffPeakMaxCompactSize()); long newMajorCompactionPeriod=hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionPeriod() + 10; conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD,newMajorCompactionPeriod); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMajorCompactionPeriod,hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionPeriod()); float newMajorCompactionJitter=hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionJitter() + 0.02F; conf.setFloat("hbase.hregion.majorcompaction.jitter",newMajorCompactionJitter); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMajorCompactionJitter,hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionJitter(),0.00001); }

Class: org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests region sever reportForDuty with backup master becomes primary master after * the first master goes away. */ @Test(timeout=180000) public void testReportForDutyWithMasterChange() throws Exception { cluster.getConfiguration().setInt(HConstants.MASTER_PORT,HBaseTestingUtility.randomFreePort()); cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,2); cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART,2); master=cluster.addMaster(); rs=cluster.addRegionServer(); LOG.debug("Starting master: " + master.getMaster().getServerName()); master.start(); rs.start(); waitForClusterOnline(master); cluster.getConfiguration().set(HConstants.REGION_SERVER_IMPL,MyRegionServer.class.getName()); rs2=cluster.addRegionServer(); LOG.debug("Starting 2nd region server: " + rs2.getRegionServer().getServerName()); rs2.start(); waitForSecondRsStarted(); master.getMaster().stop("Stopping master"); cluster.getConfiguration().setInt(HConstants.MASTER_PORT,HBaseTestingUtility.randomFreePort()); cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,3); cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART,3); backupMaster=cluster.addMaster(); LOG.debug("Starting new master: " + backupMaster.getMaster().getServerName()); backupMaster.start(); waitForClusterOnline(backupMaster); assertTrue(backupMaster.getMaster().isActiveMaster()); assertTrue(backupMaster.getMaster().isInitialized()); assertEquals(backupMaster.getMaster().getServerManager().getOnlineServersList().size(),3); }

Class: org.apache.hadoop.hbase.regionserver.TestRegionSplitPolicy

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testForceSplitRegionWithReference() throws IOException { htd.setMaxFileSize(1024L); HStore mockStore=Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(false).when(mockStore).canSplit(); stores.add(mockStore); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy policy=(ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion,conf); assertFalse(policy.shouldSplit()); Mockito.doReturn(true).when(mockRegion).shouldForceSplit(); assertFalse(policy.shouldSplit()); Mockito.doReturn(false).when(mockRegion).shouldForceSplit(); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,IncreasingToUpperBoundRegionSplitPolicy.class.getName()); policy=(IncreasingToUpperBoundRegionSplitPolicy)RegionSplitPolicy.create(mockRegion,conf); assertFalse(policy.shouldSplit()); Mockito.doReturn(true).when(mockRegion).shouldForceSplit(); assertFalse(policy.shouldSplit()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException { HTableDescriptor myHtd=new HTableDescriptor(TableName.valueOf("foobar")); myHtd.setValue(HTableDescriptor.SPLIT_POLICY,DelimitedKeyPrefixRegionSplitPolicy.class.getName()); myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY,","); HRegion myMockRegion=Mockito.mock(HRegion.class); Mockito.doReturn(myHtd).when(myMockRegion).getTableDesc(); Mockito.doReturn(stores).when(myMockRegion).getStores(); HStore mockStore=Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Bytes.toBytes("ab,cd")).when(mockStore).getSplitPoint(); stores.add(mockStore); DelimitedKeyPrefixRegionSplitPolicy policy=(DelimitedKeyPrefixRegionSplitPolicy)RegionSplitPolicy.create(myMockRegion,conf); assertEquals("ab",Bytes.toString(policy.getSplitPoint())); Mockito.doReturn(true).when(myMockRegion).shouldForceSplit(); Mockito.doReturn(Bytes.toBytes("efg,h")).when(myMockRegion).getExplicitSplitPoint(); policy=(DelimitedKeyPrefixRegionSplitPolicy)RegionSplitPolicy.create(myMockRegion,conf); assertEquals("efg",Bytes.toString(policy.getSplitPoint())); Mockito.doReturn(Bytes.toBytes("ijk")).when(myMockRegion).getExplicitSplitPoint(); assertEquals("ijk",Bytes.toString(policy.getSplitPoint())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testIncreasingToUpperBoundRegionSplitPolicy() throws IOException { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,IncreasingToUpperBoundRegionSplitPolicy.class.getName()); RegionServerServices rss=Mockito.mock(RegionServerServices.class); final List regions=new ArrayList(); Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions); Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss); long maxSplitSize=1024L; htd.setMaxFileSize(maxSplitSize); long flushSize=maxSplitSize / 8; conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,flushSize); htd.setMemStoreFlushSize(flushSize); IncreasingToUpperBoundRegionSplitPolicy policy=(IncreasingToUpperBoundRegionSplitPolicy)RegionSplitPolicy.create(mockRegion,conf); doConstantSizePolicyTests(policy); HStore mockStore=Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); stores.add(mockStore); assertTrue(policy.shouldSplit()); regions.add(mockRegion); Mockito.doReturn(flushSize).when(mockStore).getSize(); assertFalse(policy.shouldSplit()); Mockito.doReturn(flushSize * 2 + 1).when(mockStore).getSize(); assertTrue(policy.shouldSplit()); regions.add(mockRegion); assertFalse(policy.shouldSplit()); Mockito.doReturn((long)(maxSplitSize * 1.25 + 1)).when(mockStore).getSize(); assertTrue(policy.shouldSplit()); assertWithinJitter(maxSplitSize,policy.getSizeToCheck(1000)); assertWithinJitter(maxSplitSize,policy.getSizeToCheck(0)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetSplitPoint() throws IOException { ConstantSizeRegionSplitPolicy policy=(ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(mockRegion,conf); assertFalse(policy.shouldSplit()); assertNull(policy.getSplitPoint()); HStore mockStore=Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Bytes.toBytes("store 1 split")).when(mockStore).getSplitPoint(); stores.add(mockStore); assertEquals("store 1 split",Bytes.toString(policy.getSplitPoint())); HStore mockStore2=Mockito.mock(HStore.class); Mockito.doReturn(4000L).when(mockStore2).getSize(); Mockito.doReturn(true).when(mockStore2).canSplit(); Mockito.doReturn(Bytes.toBytes("store 2 split")).when(mockStore2).getSplitPoint(); stores.add(mockStore2); assertEquals("store 2 split",Bytes.toString(policy.getSplitPoint())); }

Class: org.apache.hadoop.hbase.regionserver.TestResettingCounters

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testResettingCounters() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); Configuration conf=htu.getConfiguration(); FileSystem fs=FileSystem.get(conf); byte[] table=Bytes.toBytes("table"); byte[][] families=new byte[][]{Bytes.toBytes("family1"),Bytes.toBytes("family2"),Bytes.toBytes("family3")}; int numQualifiers=10; byte[][] qualifiers=new byte[numQualifiers][]; for (int i=0; i < numQualifiers; i++) qualifiers[i]=Bytes.toBytes("qf" + i); int numRows=10; byte[][] rows=new byte[numRows][]; for (int i=0; i < numRows; i++) rows[i]=Bytes.toBytes("r" + i); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(table)); for ( byte[] family : families) htd.addFamily(new HColumnDescriptor(family)); HRegionInfo hri=new HRegionInfo(htd.getTableName(),null,null,false); String testDir=htu.getDataTestDir() + "/TestResettingCounters/"; Path path=new Path(testDir); if (fs.exists(path)) { if (!fs.delete(path,true)) { throw new IOException("Failed delete of " + path); } } Region region=HBaseTestingUtility.createRegionAndWAL(hri,path,conf,htd); try { Increment odd=new Increment(rows[0]); odd.setDurability(Durability.SKIP_WAL); Increment even=new Increment(rows[0]); even.setDurability(Durability.SKIP_WAL); Increment all=new Increment(rows[0]); all.setDurability(Durability.SKIP_WAL); for (int i=0; i < numQualifiers; i++) { if (i % 2 == 0) even.addColumn(families[0],qualifiers[i],1); else odd.addColumn(families[0],qualifiers[i],1); all.addColumn(families[0],qualifiers[i],1); } for (int i=0; i < 5; i++) region.increment(odd,HConstants.NO_NONCE,HConstants.NO_NONCE); region.flush(true); for (int i=0; i < 5; i++) region.increment(even,HConstants.NO_NONCE,HConstants.NO_NONCE); Result result=region.increment(all,HConstants.NO_NONCE,HConstants.NO_NONCE); assertEquals(numQualifiers,result.size()); Cell[] kvs=result.rawCells(); for (int i=0; i < kvs.length; i++) { System.out.println(kvs[i].toString()); assertTrue(CellUtil.matchingQualifier(kvs[i],qualifiers[i])); assertEquals(6,Bytes.toLong(CellUtil.cloneValue(kvs[i]))); } } finally { HBaseTestingUtility.closeRegionAndWAL(region); } HBaseTestingUtility.closeRegionAndWAL(region); }

Class: org.apache.hadoop.hbase.regionserver.TestReversibleScanners

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReversibleKeyValueHeap() throws IOException { FileSystem fs=TEST_UTIL.getTestFileSystem(); Path hfilePath=new Path(new Path(TEST_UTIL.getDataTestDir("testReversibleKeyValueHeap"),"regionname"),"familyname"); CacheConfig cacheConf=new CacheConfig(TEST_UTIL.getConfiguration()); HFileContextBuilder hcBuilder=new HFileContextBuilder(); hcBuilder.withBlockSize(2 * 1024); HFileContext hFileContext=hcBuilder.build(); StoreFile.Writer writer1=new StoreFile.WriterBuilder(TEST_UTIL.getConfiguration(),cacheConf,fs).withOutputDir(hfilePath).withFileContext(hFileContext).build(); StoreFile.Writer writer2=new StoreFile.WriterBuilder(TEST_UTIL.getConfiguration(),cacheConf,fs).withOutputDir(hfilePath).withFileContext(hFileContext).build(); MemStore memstore=new DefaultMemStore(); writeMemstoreAndStoreFiles(memstore,new StoreFile.Writer[]{writer1,writer2}); StoreFile sf1=new StoreFile(fs,writer1.getPath(),TEST_UTIL.getConfiguration(),cacheConf,BloomType.NONE); StoreFile sf2=new StoreFile(fs,writer2.getPath(),TEST_UTIL.getConfiguration(),cacheConf,BloomType.NONE); int startRowNum=ROWSIZE / 2; ReversedKeyValueHeap kvHeap=getReversibleKeyValueHeap(memstore,sf1,sf2,ROWS[startRowNum],MAXMVCC); internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap,startRowNum); startRowNum=ROWSIZE - 1; kvHeap=getReversibleKeyValueHeap(memstore,sf1,sf2,HConstants.EMPTY_START_ROW,MAXMVCC); internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap,startRowNum); for (int readPoint=0; readPoint < MAXMVCC; readPoint++) { LOG.info("Setting read point to " + readPoint); startRowNum=ROWSIZE - 1; kvHeap=getReversibleKeyValueHeap(memstore,sf1,sf2,HConstants.EMPTY_START_ROW,readPoint); for (int i=startRowNum; i >= 0; i--) { if (i - 2 < 0) break; i=i - 2; kvHeap.seekToPreviousRow(KeyValueUtil.createFirstOnRow(ROWS[i + 1])); Pair nextReadableNum=getNextReadableNumWithBackwardScan(i,0,readPoint); if (nextReadableNum == null) break; KeyValue expecedKey=makeKV(nextReadableNum.getFirst(),nextReadableNum.getSecond()); assertEquals(expecedKey,kvHeap.peek()); i=nextReadableNum.getFirst(); int qualNum=nextReadableNum.getSecond(); if (qualNum + 1 < QUALSIZE) { kvHeap.backwardSeek(makeKV(i,qualNum + 1)); nextReadableNum=getNextReadableNumWithBackwardScan(i,qualNum + 1,readPoint); if (nextReadableNum == null) break; expecedKey=makeKV(nextReadableNum.getFirst(),nextReadableNum.getSecond()); assertEquals(expecedKey,kvHeap.peek()); i=nextReadableNum.getFirst(); qualNum=nextReadableNum.getSecond(); } kvHeap.next(); if (qualNum + 1 >= QUALSIZE) { nextReadableNum=getNextReadableNumWithBackwardScan(i - 1,0,readPoint); } else { nextReadableNum=getNextReadableNumWithBackwardScan(i,qualNum + 1,readPoint); } if (nextReadableNum == null) break; expecedKey=makeKV(nextReadableNum.getFirst(),nextReadableNum.getSecond()); assertEquals(expecedKey,kvHeap.peek()); i=nextReadableNum.getFirst(); } } }

Class: org.apache.hadoop.hbase.regionserver.TestScanDeleteTracker

InternalCallVerifier EqualityVerifier 
@Test public void testDeletedBy_DeleteColumn(){ KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),Bytes.toBytes("qualifier"),timestamp,KeyValue.Type.DeleteColumn); sdt.add(kv); timestamp-=5; kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),Bytes.toBytes("qualifier"),timestamp,KeyValue.Type.DeleteColumn); DeleteResult ret=sdt.isDeleted(kv); assertEquals(DeleteResult.COLUMN_DELETED,ret); }

InternalCallVerifier EqualityVerifier 
@Test public void testDeletedBy_DeleteFamilyVersion(){ byte[] qualifier1=Bytes.toBytes("qualifier1"); byte[] qualifier2=Bytes.toBytes("qualifier2"); byte[] qualifier3=Bytes.toBytes("qualifier3"); byte[] qualifier4=Bytes.toBytes("qualifier4"); deleteType=KeyValue.Type.DeleteFamilyVersion.getCode(); KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),null,timestamp,KeyValue.Type.DeleteFamilyVersion); sdt.add(kv); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier1,timestamp,KeyValue.Type.DeleteFamilyVersion); DeleteResult ret=sdt.isDeleted(kv); assertEquals(DeleteResult.FAMILY_VERSION_DELETED,ret); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier2,timestamp,KeyValue.Type.DeleteFamilyVersion); ret=sdt.isDeleted(kv); assertEquals(DeleteResult.FAMILY_VERSION_DELETED,ret); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier3,timestamp,KeyValue.Type.DeleteFamilyVersion); ret=sdt.isDeleted(kv); assertEquals(DeleteResult.FAMILY_VERSION_DELETED,ret); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier4,timestamp,KeyValue.Type.DeleteFamilyVersion); ret=sdt.isDeleted(kv); assertEquals(DeleteResult.FAMILY_VERSION_DELETED,ret); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier1,timestamp + 3,KeyValue.Type.DeleteFamilyVersion); ret=sdt.isDeleted(kv); assertEquals(DeleteResult.NOT_DELETED,ret); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier2,timestamp - 2,KeyValue.Type.DeleteFamilyVersion); ret=sdt.isDeleted(kv); assertEquals(DeleteResult.NOT_DELETED,ret); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier3,timestamp - 5,KeyValue.Type.DeleteFamilyVersion); ret=sdt.isDeleted(kv); assertEquals(DeleteResult.NOT_DELETED,ret); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier4,timestamp + 8,KeyValue.Type.DeleteFamilyVersion); ret=sdt.isDeleted(kv); assertEquals(DeleteResult.NOT_DELETED,ret); }

InternalCallVerifier EqualityVerifier 
@Test public void testDelete_DeleteColumn(){ byte[] qualifier=Bytes.toBytes("qualifier"); deleteType=KeyValue.Type.Delete.getCode(); KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,timestamp,KeyValue.Type.Delete); sdt.add(kv); timestamp-=5; kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,timestamp,KeyValue.Type.DeleteColumn); deleteType=KeyValue.Type.DeleteColumn.getCode(); sdt.add(kv); timestamp-=5; kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,timestamp,KeyValue.Type.DeleteColumn); DeleteResult ret=sdt.isDeleted(kv); assertEquals(DeleteResult.COLUMN_DELETED,ret); }

InternalCallVerifier EqualityVerifier 
@Test public void testDelete_KeepVersionZero(){ byte[] qualifier=Bytes.toBytes("qualifier"); deleteType=KeyValue.Type.Delete.getCode(); long deleteTimestamp=10; long valueTimestamp=0; sdt.reset(); KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,deleteTimestamp,KeyValue.Type.Delete); sdt.add(kv); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,valueTimestamp,KeyValue.Type.Delete); DeleteResult ret=sdt.isDeleted(kv); assertEquals(DeleteResult.NOT_DELETED,ret); }

InternalCallVerifier EqualityVerifier 
@Test public void testDeletedBy_DeleteFamily(){ KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),Bytes.toBytes("qualifier"),timestamp,KeyValue.Type.DeleteFamily); sdt.add(kv); timestamp-=5; kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),Bytes.toBytes("qualifier"),timestamp,KeyValue.Type.DeleteColumn); DeleteResult ret=sdt.isDeleted(kv); assertEquals(DeleteResult.FAMILY_DELETED,ret); }

InternalCallVerifier EqualityVerifier 
@Test public void testDelete_KeepDelete(){ byte[] qualifier=Bytes.toBytes("qualifier"); deleteType=KeyValue.Type.Delete.getCode(); KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,timestamp,KeyValue.Type.Delete); sdt.add(kv); sdt.isDeleted(kv); assertEquals(false,sdt.isEmpty()); }

InternalCallVerifier EqualityVerifier 
@Test public void testDeleteColumn_Delete(){ byte[] qualifier=Bytes.toBytes("qualifier"); deleteType=KeyValue.Type.DeleteColumn.getCode(); KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,timestamp,KeyValue.Type.DeleteColumn); sdt.add(kv); qualifier=Bytes.toBytes("qualifier1"); deleteType=KeyValue.Type.Delete.getCode(); kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),qualifier,timestamp,KeyValue.Type.Delete); sdt.add(kv); DeleteResult ret=sdt.isDeleted(kv); assertEquals(DeleteResult.VERSION_DELETED,ret); }

InternalCallVerifier EqualityVerifier 
@Test public void testDeletedBy_Delete(){ KeyValue kv=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("f"),Bytes.toBytes("qualifier"),timestamp,KeyValue.Type.Delete); sdt.add(kv); DeleteResult ret=sdt.isDeleted(kv); assertEquals(DeleteResult.VERSION_DELETED,ret); }

Class: org.apache.hadoop.hbase.regionserver.TestScanner

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner * update readers code essentially. This is not highly concurrent, since its all 1 thread. * HBase-910. * @throws Exception */ @Test public void testScanAndSyncFlush() throws Exception { this.region=TEST_UTIL.createLocalHRegion(TESTTABLEDESC,null,null); Table hri=new RegionAsTable(region); try { LOG.info("Added: " + HBaseTestCase.addContent(hri,Bytes.toString(HConstants.CATALOG_FAMILY),Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); int count=count(hri,-1,false); assertEquals(count,count(hri,100,false)); } catch ( Exception e) { LOG.error("Failed",e); throw e; } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests to do a concurrent flush (using a 2nd thread) while scanning. This tests both * the StoreScanner update readers and the transition from memstore -> snapshot -> store file. * @throws Exception */ @Test public void testScanAndRealConcurrentFlush() throws Exception { this.region=TEST_UTIL.createLocalHRegion(TESTTABLEDESC,null,null); Table hri=new RegionAsTable(region); try { LOG.info("Added: " + HBaseTestCase.addContent(hri,Bytes.toString(HConstants.CATALOG_FAMILY),Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); int count=count(hri,-1,false); assertEquals(count,count(hri,100,true)); } catch ( Exception e) { LOG.error("Failed",e); throw e; } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); } }

Class: org.apache.hadoop.hbase.regionserver.TestScannerWithBulkload

IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBulkLoad() throws Exception { TableName tableName=TableName.valueOf("testBulkLoad"); long l=System.currentTimeMillis(); Admin admin=TEST_UTIL.getHBaseAdmin(); createTable(admin,tableName); Scan scan=createScan(); final Table table=init(admin,l,scan,tableName); final Path hfilePath=writeToHFile(l,"/temp/testBulkLoad/","/temp/testBulkLoad/col/file",false); Configuration conf=TEST_UTIL.getConfiguration(); conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers",true); final LoadIncrementalHFiles bulkload=new LoadIncrementalHFiles(conf); try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ bulkload.doBulkLoad(hfilePath,admin,table,locator); } ResultScanner scanner=table.getScanner(scan); Result result=scanner.next(); result=scanAfterBulkLoad(scanner,result,"version2"); Put put0=new Put(Bytes.toBytes("row1")); put0.add(new KeyValue(Bytes.toBytes("row1"),Bytes.toBytes("col"),Bytes.toBytes("q"),l,Bytes.toBytes("version3"))); table.put(put0); admin.flush(tableName); scanner=table.getScanner(scan); result=scanner.next(); while (result != null) { List cells=result.getColumnCells(Bytes.toBytes("col"),Bytes.toBytes("q")); for ( Cell _c : cells) { if (Bytes.toString(_c.getRowArray(),_c.getRowOffset(),_c.getRowLength()).equals("row1")) { System.out.println(Bytes.toString(_c.getRowArray(),_c.getRowOffset(),_c.getRowLength())); System.out.println(Bytes.toString(_c.getQualifierArray(),_c.getQualifierOffset(),_c.getQualifierLength())); System.out.println(Bytes.toString(_c.getValueArray(),_c.getValueOffset(),_c.getValueLength())); Assert.assertEquals("version3",Bytes.toString(_c.getValueArray(),_c.getValueOffset(),_c.getValueLength())); } } result=scanner.next(); } scanner.close(); table.close(); }

IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBulkLoadNativeHFile() throws Exception { TableName tableName=TableName.valueOf("testBulkLoadNativeHFile"); long l=System.currentTimeMillis(); Admin admin=TEST_UTIL.getHBaseAdmin(); createTable(admin,tableName); Scan scan=createScan(); final Table table=init(admin,l,scan,tableName); final Path hfilePath=writeToHFile(l,"/temp/testBulkLoadNativeHFile/","/temp/testBulkLoadNativeHFile/col/file",true); Configuration conf=TEST_UTIL.getConfiguration(); conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers",true); final LoadIncrementalHFiles bulkload=new LoadIncrementalHFiles(conf); try (RegionLocator locator=TEST_UTIL.getConnection().getRegionLocator(tableName)){ bulkload.doBulkLoad(hfilePath,admin,table,locator); } ResultScanner scanner=table.getScanner(scan); Result result=scanner.next(); result=scanAfterBulkLoad(scanner,result,"version2"); Put put0=new Put(Bytes.toBytes("row1")); put0.add(new KeyValue(Bytes.toBytes("row1"),Bytes.toBytes("col"),Bytes.toBytes("q"),l,Bytes.toBytes("version3"))); table.put(put0); admin.flush(tableName); scanner=table.getScanner(scan); result=scanner.next(); while (result != null) { List cells=result.getColumnCells(Bytes.toBytes("col"),Bytes.toBytes("q")); for ( Cell _c : cells) { if (Bytes.toString(_c.getRowArray(),_c.getRowOffset(),_c.getRowLength()).equals("row1")) { System.out.println(Bytes.toString(_c.getRowArray(),_c.getRowOffset(),_c.getRowLength())); System.out.println(Bytes.toString(_c.getQualifierArray(),_c.getQualifierOffset(),_c.getQualifierLength())); System.out.println(Bytes.toString(_c.getValueArray(),_c.getValueOffset(),_c.getValueLength())); Assert.assertEquals("version3",Bytes.toString(_c.getValueArray(),_c.getValueOffset(),_c.getValueLength())); } } result=scanner.next(); } scanner.close(); table.close(); }

Class: org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRowRange() throws Throwable { try (Table table=util.getConnection().getTable(TEST_TABLE);RegionLocator locator=util.getConnection().getRegionLocator(TEST_TABLE)){ for ( HRegionLocation e : locator.getAllRegionLocations()) { LOG.info("Region " + e.getRegionInfo().getRegionNameAsString() + ", servername="+ e.getServerName()); } Map results=ping(table,null,ROW_A); assertEquals(1,results.size()); verifyRegionResults(locator,results,ROW_A); results=ping(table,ROW_BC,null); assertEquals(2,results.size()); HRegionLocation loc=locator.getRegionLocation(ROW_A,true); assertNull("Should be missing region for row aaa (prior to start row)",results.get(loc.getRegionInfo().getRegionName())); verifyRegionResults(locator,results,ROW_B); verifyRegionResults(locator,results,ROW_C); results=ping(table,null,ROW_BC); assertEquals(2,results.size()); verifyRegionResults(locator,results,ROW_A); verifyRegionResults(locator,results,ROW_B); loc=locator.getRegionLocation(ROW_C,true); assertNull("Should be missing region for row ccc (past stop row)",results.get(loc.getRegionInfo().getRegionName())); results=ping(table,ROW_AB,ROW_BC); assertEquals(2,results.size()); verifyRegionResults(locator,results,ROW_A); verifyRegionResults(locator,results,ROW_B); loc=locator.getRegionLocation(ROW_C,true); assertNull("Should be missing region for row ccc (past stop row)",results.get(loc.getRegionInfo().getRegionName())); results=ping(table,ROW_B,ROW_BC); assertEquals(1,results.size()); verifyRegionResults(locator,results,ROW_B); loc=locator.getRegionLocation(ROW_A,true); assertNull("Should be missing region for row aaa (prior to start)",results.get(loc.getRegionInfo().getRegionName())); loc=locator.getRegionLocation(ROW_C,true); assertNull("Should be missing region for row ccc (past stop row)",results.get(loc.getRegionInfo().getRegionName())); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyReturnType() throws Throwable { try (Table table=util.getConnection().getTable(TEST_TABLE)){ Map results=noop(table,ROW_A,ROW_C); assertEquals("Should have results from three regions",3,results.size()); for ( Object v : results.values()) { assertNull(v); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSingleMethod() throws Throwable { try (Table table=util.getConnection().getTable(TEST_TABLE);RegionLocator locator=util.getConnection().getRegionLocator(TEST_TABLE)){ Map results=table.coprocessorService(PingProtos.PingService.class,null,ROW_A,new Batch.Call(){ @Override public String call( PingProtos.PingService instance) throws IOException { BlockingRpcCallback rpcCallback=new BlockingRpcCallback(); instance.ping(null,PingProtos.PingRequest.newBuilder().build(),rpcCallback); return rpcCallback.get().getPong(); } } ); assertEquals(1,results.size()); verifyRegionResults(locator,results,ROW_A); final String name="NAME"; results=hello(table,name,null,ROW_A); assertEquals(1,results.size()); verifyRegionResults(locator,results,"Hello, NAME",ROW_A); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleProxy() throws Throwable { Table table=util.getConnection().getTable(TEST_TABLE); Map results=ping(table,null,null); assertEquals(3,results.size()); for ( Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response","pong",e.getValue()); } hello(table,"George",HELLO + "George"); LOG.info("Did george"); hello(table,null,"Who are you?"); LOG.info("Who are you"); hello(table,NOBODY,null); LOG.info(NOBODY); Map intResults=table.coprocessorService(PingProtos.PingService.class,null,null,new Batch.Call(){ @Override public Integer call( PingProtos.PingService instance) throws IOException { BlockingRpcCallback rpcCallback=new BlockingRpcCallback(); instance.count(null,PingProtos.CountRequest.newBuilder().build(),rpcCallback); return rpcCallback.get().getCount(); } } ); int count=-1; for ( Map.Entry e : intResults.entrySet()) { assertTrue(e.getValue() > 0); count=e.getValue(); } final int diff=5; intResults=table.coprocessorService(PingProtos.PingService.class,null,null,new Batch.Call(){ @Override public Integer call( PingProtos.PingService instance) throws IOException { BlockingRpcCallback rpcCallback=new BlockingRpcCallback(); instance.increment(null,PingProtos.IncrementCountRequest.newBuilder().setDiff(diff).build(),rpcCallback); return rpcCallback.get().getCount(); } } ); assertEquals(3,results.size()); for ( Map.Entry e : intResults.entrySet()) { assertEquals(e.getValue().intValue(),count + diff); } table.close(); }

Class: org.apache.hadoop.hbase.regionserver.TestServerNonceManager

InternalCallVerifier BooleanVerifier 
@Test public void testCleanup() throws Exception { ManualEnvironmentEdge edge=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); try { ServerNonceManager nm=createManager(6); ScheduledChore cleanup=nm.createCleanupScheduledChore(Mockito.mock(Stoppable.class)); edge.setValue(1); assertTrue(nm.startOperation(NO_NONCE,1,createStoppable())); assertTrue(nm.startOperation(NO_NONCE,2,createStoppable())); assertTrue(nm.startOperation(NO_NONCE,3,createStoppable())); edge.setValue(2); nm.endOperation(NO_NONCE,1,true); edge.setValue(4); nm.endOperation(NO_NONCE,2,true); edge.setValue(9); cleanup.choreForTesting(); assertTrue(nm.startOperation(NO_NONCE,1,createStoppable())); assertFalse(nm.startOperation(NO_NONCE,2,createStoppable())); nm.endOperation(NO_NONCE,3,false); assertTrue(nm.startOperation(NO_NONCE,3,createStoppable())); edge.setValue(11); cleanup.choreForTesting(); assertTrue(nm.startOperation(NO_NONCE,2,createStoppable())); } finally { EnvironmentEdgeManager.reset(); } }

InternalCallVerifier BooleanVerifier 
@Test public void testWalNonces() throws Exception { ManualEnvironmentEdge edge=new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); try { ServerNonceManager nm=createManager(6); ScheduledChore cleanup=nm.createCleanupScheduledChore(Mockito.mock(Stoppable.class)); edge.setValue(12); nm.reportOperationFromWal(NO_NONCE,1,8); nm.reportOperationFromWal(NO_NONCE,2,2); nm.reportOperationFromWal(NO_NONCE,3,5); nm.reportOperationFromWal(NO_NONCE,3,6); assertFalse(nm.startOperation(NO_NONCE,1,createStoppable())); assertTrue(nm.startOperation(NO_NONCE,2,createStoppable())); assertFalse(nm.startOperation(NO_NONCE,3,createStoppable())); edge.setValue(17); cleanup.choreForTesting(); assertFalse(nm.startOperation(NO_NONCE,1,createStoppable())); assertFalse(nm.startOperation(NO_NONCE,3,createStoppable())); edge.setValue(19); cleanup.choreForTesting(); assertTrue(nm.startOperation(NO_NONCE,1,createStoppable())); assertTrue(nm.startOperation(NO_NONCE,3,createStoppable())); } finally { EnvironmentEdgeManager.reset(); } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNormalStartEnd() throws Exception { final long[] numbers=new long[]{NO_NONCE,1,2,Long.MAX_VALUE,Long.MIN_VALUE}; ServerNonceManager nm=createManager(); for (int i=0; i < numbers.length; ++i) { for (int j=0; j < numbers.length; ++j) { assertTrue(nm.startOperation(numbers[i],numbers[j],createStoppable())); } } for (int i=0; i < numbers.length; ++i) { assertTrue(nm.startOperation(numbers[i],NO_NONCE,createStoppable())); } for (int i=0; i < numbers.length; ++i) { for (int j=0; j < numbers.length; ++j) { nm.endOperation(numbers[i],numbers[j],false); assertTrue(nm.startOperation(numbers[i],numbers[j],createStoppable())); } } for (int i=0; i < numbers.length; ++i) { for (int j=0; j < numbers.length; ++j) { nm.endOperation(numbers[i],numbers[j],true); assertEquals(numbers[j] == NO_NONCE,nm.startOperation(numbers[i],numbers[j],createStoppable())); } } }

Class: org.apache.hadoop.hbase.regionserver.TestSplitLogWorker

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAcquireMultiTasks() throws Exception { LOG.info("testAcquireMultiTasks"); SplitLogCounters.resetCounters(); final String TATAS="tatas"; final ServerName RS=ServerName.valueOf("rs,1,1"); final int maxTasks=3; Configuration testConf=HBaseConfiguration.create(TEST_UTIL.getConfiguration()); testConf.setInt("hbase.regionserver.wal.max.splitters",maxTasks); RegionServerServices mockedRS=getRegionServer(RS); for (int i=0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw,TATAS + i),new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"),this.mode).toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); } SplitLogWorker slw=new SplitLogWorker(ds,testConf,mockedRS,neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired,0,maxTasks,WAIT_TIME); for (int i=0; i < maxTasks; i++) { byte[] bytes=ZKUtil.getData(zkw,ZKSplitLog.getEncodedNodeName(zkw,TATAS + i)); SplitLogTask slt=SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } } finally { stopSplitLogWorker(slw); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testMultipleTasks() throws Exception { LOG.info("testMultipleTasks"); SplitLogCounters.resetCounters(); final ServerName SRV=ServerName.valueOf("tmt_svr,1,1"); final String PATH1=ZKSplitLog.getEncodedNodeName(zkw,"tmt_task"); RegionServerServices mockedRS=getRegionServer(SRV); SplitLogWorker slw=new SplitLogWorker(ds,TEST_UTIL.getConfiguration(),mockedRS,neverEndingTask); slw.start(); try { Thread.yield(); Thread.sleep(100); waitForCounter(SplitLogCounters.tot_wkr_task_grabing,0,1,WAIT_TIME); SplitLogTask unassignedManager=new SplitLogTask.Unassigned(MANAGER,this.mode); zkw.getRecoverableZooKeeper().create(PATH1,unassignedManager.toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired,0,1,WAIT_TIME); final String PATH2=ZKSplitLog.getEncodedNodeName(zkw,"tmt_task_2"); zkw.getRecoverableZooKeeper().create(PATH2,unassignedManager.toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); final ServerName anotherWorker=ServerName.valueOf("another-worker,1,1"); SplitLogTask slt=new SplitLogTask.Owned(anotherWorker,this.mode); ZKUtil.setData(zkw,PATH1,slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task,0,1,WAIT_TIME); waitForCounter(SplitLogCounters.tot_wkr_task_acquired,1,2,WAIT_TIME); assertEquals(2,slw.getTaskReadySeq()); byte[] bytes=ZKUtil.getData(zkw,PATH2); slt=SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SRV)); } finally { stopSplitLogWorker(slw); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testAcquireTaskAtStartup() throws Exception { LOG.info("testAcquireTaskAtStartup"); SplitLogCounters.resetCounters(); final String TATAS="tatas"; final ServerName RS=ServerName.valueOf("rs,1,1"); RegionServerServices mockedRS=getRegionServer(RS); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw,TATAS),new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"),this.mode).toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); SplitLogWorker slw=new SplitLogWorker(ds,TEST_UTIL.getConfiguration(),mockedRS,neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired,0,1,WAIT_TIME); byte[] bytes=ZKUtil.getData(zkw,ZKSplitLog.getEncodedNodeName(zkw,TATAS)); SplitLogTask slt=SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } finally { stopSplitLogWorker(slw); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testRaceForTask() throws Exception { LOG.info("testRaceForTask"); SplitLogCounters.resetCounters(); final String TRFT="trft"; final ServerName SVR1=ServerName.valueOf("svr1,1,1"); final ServerName SVR2=ServerName.valueOf("svr2,1,1"); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw,TRFT),new SplitLogTask.Unassigned(MANAGER,this.mode).toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); RegionServerServices mockedRS1=getRegionServer(SVR1); RegionServerServices mockedRS2=getRegionServer(SVR2); SplitLogWorker slw1=new SplitLogWorker(ds,TEST_UTIL.getConfiguration(),mockedRS1,neverEndingTask); SplitLogWorker slw2=new SplitLogWorker(ds,TEST_UTIL.getConfiguration(),mockedRS2,neverEndingTask); slw1.start(); slw2.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired,0,1,WAIT_TIME); assertTrue(waitForCounterBoolean(SplitLogCounters.tot_wkr_failed_to_grab_task_owned,0,1,WAIT_TIME,false) || SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.get() == 1); byte[] bytes=ZKUtil.getData(zkw,ZKSplitLog.getEncodedNodeName(zkw,TRFT)); SplitLogTask slt=SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SVR1) || slt.isOwned(SVR2)); } finally { stopSplitLogWorker(slw1); stopSplitLogWorker(slw2); } }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=60000) public void testRescan() throws Exception { LOG.info("testRescan"); SplitLogCounters.resetCounters(); final ServerName SRV=ServerName.valueOf("svr,1,1"); RegionServerServices mockedRS=getRegionServer(SRV); slw=new SplitLogWorker(ds,TEST_UTIL.getConfiguration(),mockedRS,neverEndingTask); slw.start(); Thread.yield(); Thread.sleep(100); String task=ZKSplitLog.getEncodedNodeName(zkw,"task"); SplitLogTask slt=new SplitLogTask.Unassigned(MANAGER,this.mode); zkw.getRecoverableZooKeeper().create(task,slt.toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired,0,1,WAIT_TIME); ZKUtil.setData(zkw,task,slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task,0,1,WAIT_TIME); String rescan=ZKSplitLog.getEncodedNodeName(zkw,"RESCAN"); rescan=zkw.getRecoverableZooKeeper().create(rescan,slt.toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT_SEQUENTIAL); waitForCounter(SplitLogCounters.tot_wkr_task_acquired,1,2,WAIT_TIME); ZKUtil.setData(zkw,task,slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task,1,2,WAIT_TIME); waitForCounter(SplitLogCounters.tot_wkr_task_acquired_rescan,0,1,WAIT_TIME); List nodes=ZKUtil.listChildrenNoWatch(zkw,zkw.splitLogZNode); LOG.debug(nodes); int num=0; for ( String node : nodes) { num++; if (node.startsWith("RESCAN")) { String name=ZKSplitLog.getEncodedNodeName(zkw,node); String fn=ZKSplitLog.getFileName(name); byte[] data=ZKUtil.getData(zkw,ZKUtil.joinZNode(zkw.splitLogZNode,fn)); slt=SplitLogTask.parseFrom(data); assertTrue(slt.toString(),slt.isDone(SRV)); } } assertEquals(2,num); }

TestInitializer InternalCallVerifier ConditionMatcher HybridVerifier 
@Before public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf=TEST_UTIL.getConfiguration(); zkw=new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),"split-log-worker-tests",null); ds=new DummyServer(zkw,conf); ZKUtil.deleteChildrenRecursively(zkw,zkw.baseZNode); ZKUtil.createAndFailSilent(zkw,zkw.baseZNode); assertThat(ZKUtil.checkExists(zkw,zkw.baseZNode),not(is(-1))); LOG.debug(zkw.baseZNode + " created"); ZKUtil.createAndFailSilent(zkw,zkw.splitLogZNode); assertThat(ZKUtil.checkExists(zkw,zkw.splitLogZNode),not(is(-1))); LOG.debug(zkw.splitLogZNode + " created"); ZKUtil.createAndFailSilent(zkw,zkw.rsZNode); assertThat(ZKUtil.checkExists(zkw,zkw.rsZNode),not(is(-1))); SplitLogCounters.resetCounters(); executorService=new ExecutorService("TestSplitLogWorker"); executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS,10); this.mode=(conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testPreemptTask() throws Exception { LOG.info("testPreemptTask"); SplitLogCounters.resetCounters(); final ServerName SRV=ServerName.valueOf("tpt_svr,1,1"); final String PATH=ZKSplitLog.getEncodedNodeName(zkw,"tpt_task"); RegionServerServices mockedRS=getRegionServer(SRV); SplitLogWorker slw=new SplitLogWorker(ds,TEST_UTIL.getConfiguration(),mockedRS,neverEndingTask); slw.start(); try { Thread.yield(); Thread.sleep(1000); waitForCounter(SplitLogCounters.tot_wkr_task_grabing,0,1,WAIT_TIME); zkw.getRecoverableZooKeeper().create(PATH,new SplitLogTask.Unassigned(MANAGER,this.mode).toByteArray(),Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired,0,1,WAIT_TIME); assertEquals(1,slw.getTaskReadySeq()); byte[] bytes=ZKUtil.getData(zkw,PATH); SplitLogTask slt=SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SRV)); slt=new SplitLogTask.Owned(MANAGER,this.mode); ZKUtil.setData(zkw,PATH,slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task,0,1,WAIT_TIME); } finally { stopSplitLogWorker(slw); } }

Class: org.apache.hadoop.hbase.regionserver.TestSplitTransaction

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRollback() throws IOException { final int rowcount=TEST_UTIL.loadRegion(this.parent,CF); assertTrue(rowcount > 0); int parentRowCount=countRows(this.parent); assertEquals(rowcount,parentRowCount); HRegion spiedRegion=spy(this.parent); SplitTransactionImpl st=prepareGOOD_SPLIT_ROW(spiedRegion); SplitTransactionImpl spiedUponSt=spy(st); doNothing().when(spiedUponSt).assertReferenceFileCount(anyInt(),eq(parent.getRegionFileSystem().getSplitsDir(st.getFirstDaughter()))); when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())).thenThrow(new MockedFailedDaughterCreation()); boolean expectedException=false; Server mockServer=Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); try { spiedUponSt.execute(mockServer,null); } catch ( MockedFailedDaughterCreation e) { expectedException=true; } assertTrue(expectedException); assertTrue(spiedUponSt.rollback(null,null)); int parentRowCount2=countRows(this.parent); assertEquals(parentRowCount,parentRowCount2); assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir,st.getFirstDaughter()))); assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir,st.getSecondDaughter()))); assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread()); assertTrue(st.prepare()); PairOfSameType daughters=st.execute(mockServer,null); int daughtersRowCount=0; for ( Region openRegion : daughters) { try { int count=countRows(openRegion); assertTrue(count > 0 && count != rowcount); daughtersRowCount+=count; } finally { HBaseTestingUtility.closeRegionAndWAL(openRegion); } } assertEquals(rowcount,daughtersRowCount); assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread()); assertTrue("Rollback hooks should be called.",wasRollBackHookCalled()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCountReferencesFailsSplit() throws IOException { final int rowcount=TEST_UTIL.loadRegion(this.parent,CF); assertTrue(rowcount > 0); int parentRowCount=countRows(this.parent); assertEquals(rowcount,parentRowCount); HRegion spiedRegion=spy(this.parent); SplitTransactionImpl st=prepareGOOD_SPLIT_ROW(spiedRegion); SplitTransactionImpl spiedUponSt=spy(st); doThrow(new IOException("Failing split. Expected reference file count isn't equal.")).when(spiedUponSt).assertReferenceFileCount(anyInt(),eq(new Path(this.parent.getRegionFileSystem().getTableDir(),st.getSecondDaughter().getEncodedName()))); boolean expectedException=false; Server mockServer=Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); try { spiedUponSt.execute(mockServer,null); } catch ( IOException e) { expectedException=true; } assertTrue(expectedException); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWholesomeSplit() throws IOException { final int rowcount=TEST_UTIL.loadRegion(this.parent,CF,true); assertTrue(rowcount > 0); int parentRowCount=countRows(this.parent); assertEquals(rowcount,parentRowCount); CacheConfig cacheConf=new CacheConfig(TEST_UTIL.getConfiguration()); ((LruBlockCache)cacheConf.getBlockCache()).clearCache(); SplitTransactionImpl st=prepareGOOD_SPLIT_ROW(); Server mockServer=Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); PairOfSameType daughters=st.execute(mockServer,null); assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir())); assertTrue(this.parent.isClosed()); assertEquals(0,this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length); assertTrue(Bytes.equals(parent.getRegionInfo().getStartKey(),daughters.getFirst().getRegionInfo().getStartKey())); assertTrue(Bytes.equals(GOOD_SPLIT_ROW,daughters.getFirst().getRegionInfo().getEndKey())); assertTrue(Bytes.equals(daughters.getSecond().getRegionInfo().getStartKey(),GOOD_SPLIT_ROW)); assertTrue(Bytes.equals(parent.getRegionInfo().getEndKey(),daughters.getSecond().getRegionInfo().getEndKey())); int daughtersRowCount=0; for ( Region openRegion : daughters) { try { int count=countRows(openRegion); assertTrue(count > 0 && count != rowcount); daughtersRowCount+=count; } finally { HBaseTestingUtility.closeRegionAndWAL(openRegion); } } assertEquals(rowcount,daughtersRowCount); assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFailAfterPONR() throws IOException, KeeperException { final int rowcount=TEST_UTIL.loadRegion(this.parent,CF); assertTrue(rowcount > 0); int parentRowCount=countRows(this.parent); assertEquals(rowcount,parentRowCount); SplitTransactionImpl st=prepareGOOD_SPLIT_ROW(); SplitTransactionImpl spiedUponSt=spy(st); Mockito.doThrow(new MockedFailedDaughterOpen()).when(spiedUponSt).openDaughterRegion((Server)Mockito.anyObject(),(HRegion)Mockito.anyObject()); boolean expectedException=false; Server mockServer=Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); try { spiedUponSt.execute(mockServer,null); } catch ( IOException e) { if (e.getCause() != null && e.getCause() instanceof MockedFailedDaughterOpen) { expectedException=true; } } assertTrue(expectedException); assertFalse(spiedUponSt.rollback(null,null)); Path tableDir=this.parent.getRegionFileSystem().getTableDir(); Path daughterADir=new Path(tableDir,spiedUponSt.getFirstDaughter().getEncodedName()); Path daughterBDir=new Path(tableDir,spiedUponSt.getSecondDaughter().getEncodedName()); assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterADir)); assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterBDir)); }

Class: org.apache.hadoop.hbase.regionserver.TestSplitTransactionOnCluster

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSplitWithRegionReplicas() throws Exception { final TableName tableName=TableName.valueOf("foobar"); HTableDescriptor htd=TESTING_UTIL.createTableDescriptor("foobar"); htd.setRegionReplication(2); htd.addCoprocessor(SlowMeCopro.class.getName()); Table t=TESTING_UTIL.createTable(htd,new byte[][]{Bytes.toBytes("cf")},null); List oldRegions; do { oldRegions=cluster.getRegions(tableName); Thread.sleep(10); } while (oldRegions.size() != 2); for ( HRegion h : oldRegions) LOG.debug("OLDREGION " + h.getRegionInfo()); try { int regionServerIndex=cluster.getServerWith(oldRegions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer=cluster.getRegionServer(regionServerIndex); insertData(tableName,admin,t); admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); boolean tableExists=MetaTableAccessor.tableExists(regionServer.getConnection(),tableName); assertEquals("The specified table should be present.",true,tableExists); final HRegion region=findSplittableRegion(oldRegions); regionServerIndex=cluster.getServerWith(region.getRegionInfo().getRegionName()); regionServer=cluster.getRegionServer(regionServerIndex); assertTrue("not able to find a splittable region",region != null); SplitTransactionImpl st=new SplitTransactionImpl(region,Bytes.toBytes("row2")); try { st.prepare(); st.execute(regionServer,regionServer); } catch ( IOException e) { e.printStackTrace(); fail("Split execution should have succeeded with no exceptions thrown " + e); } List newRegions; do { newRegions=cluster.getRegions(tableName); for ( HRegion h : newRegions) LOG.debug("NEWREGION " + h.getRegionInfo()); Thread.sleep(1000); } while ((newRegions.contains(oldRegions.get(0)) || newRegions.contains(oldRegions.get(1))) || newRegions.size() != 4); tableExists=MetaTableAccessor.tableExists(regionServer.getConnection(),tableName); assertEquals("The specified table should be present.",true,tableExists); byte[] b1="row1".getBytes(); Get g=new Get(b1); g.setConsistency(Consistency.STRONG); Result r=t.get(g); Assert.assertFalse(r.isStale()); LOG.info("exists stale after flush done"); SlowMeCopro.getCdl().set(new CountDownLatch(1)); g=new Get(b1); g.setConsistency(Consistency.TIMELINE); r=t.get(g); Assert.assertTrue(r.isStale()); SlowMeCopro.getCdl().get().countDown(); } finally { SlowMeCopro.getCdl().get().countDown(); admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); t.close(); } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testSplitFailedCompactionAndSplit() throws Exception { final TableName tableName=TableName.valueOf("testSplitFailedCompactionAndSplit"); HTableDescriptor htd=new HTableDescriptor(tableName); byte[] cf=Bytes.toBytes("cf"); htd.addFamily(new HColumnDescriptor(cf)); admin.createTable(htd); for (int i=0; cluster.getRegions(tableName).size() == 0 && i < 100; i++) { Thread.sleep(100); } assertEquals(1,cluster.getRegions(tableName).size()); HRegion region=cluster.getRegions(tableName).get(0); Store store=region.getStore(cf); int regionServerIndex=cluster.getServerWith(region.getRegionInfo().getRegionName()); HRegionServer regionServer=cluster.getRegionServer(regionServerIndex); Table t=TESTING_UTIL.getConnection().getTable(tableName); insertData(tableName,admin,t); insertData(tableName,admin,t); int fileNum=store.getStorefiles().size(); store.triggerMajorCompaction(); CompactionContext cc=store.requestCompaction(); assertNotNull(cc); assertEquals(2,region.close(false).get(cf).size()); region.initialize(); assertFalse(region.compact(cc,store,NoLimitThroughputController.INSTANCE)); assertTrue(fileNum > store.getStorefiles().size()); SplitTransactionImpl st=new SplitTransactionImpl(region,Bytes.toBytes("row3")); assertTrue(st.prepare()); st.execute(regionServer,regionServer); assertEquals(2,cluster.getRegions(tableName).size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testTableExistsIfTheSpecifiedTableRegionIsSplitParent() throws Exception { final TableName tableName=TableName.valueOf("testTableExistsIfTheSpecifiedTableRegionIsSplitParent"); Table t=createTableAndWait(tableName,Bytes.toBytes("cf")); List regions=null; try { regions=cluster.getRegions(tableName); int regionServerIndex=cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer=cluster.getRegionServer(regionServerIndex); insertData(tableName,admin,t); admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); boolean tableExists=MetaTableAccessor.tableExists(regionServer.getConnection(),tableName); assertEquals("The specified table should present.",true,tableExists); final HRegion region=findSplittableRegion(regions); assertTrue("not able to find a splittable region",region != null); SplitTransactionImpl st=new SplitTransactionImpl(region,Bytes.toBytes("row2")); try { st.prepare(); st.createDaughters(regionServer,regionServer,null); } catch ( IOException e) { } tableExists=MetaTableAccessor.tableExists(regionServer.getConnection(),tableName); assertEquals("The specified table should present.",true,tableExists); Map rit=cluster.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition(); assertTrue(rit.size() == 3); cluster.getMaster().getAssignmentManager().regionOffline(st.getFirstDaughter()); cluster.getMaster().getAssignmentManager().regionOffline(st.getSecondDaughter()); cluster.getMaster().getAssignmentManager().regionOffline(region.getRegionInfo()); rit=cluster.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition(); assertTrue(rit.size() == 0); } finally { admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); t.close(); TESTING_UTIL.deleteTable(tableName); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=60000) public void testRITStateForRollback() throws Exception { final TableName tableName=TableName.valueOf("testRITStateForRollback"); try { Table t=createTableAndWait(tableName,Bytes.toBytes("cf")); final List regions=cluster.getRegions(tableName); final HRegionInfo hri=getAndCheckSingleTableRegion(regions); insertData(tableName,admin,t); t.close(); this.admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); final HRegion region=findSplittableRegion(regions); assertTrue("not able to find a splittable region",region != null); region.getCoprocessorHost().load(FailingSplitRegionObserver.class,Coprocessor.PRIORITY_USER,region.getBaseConf()); this.admin.splitRegion(region.getRegionInfo().getRegionName(),new byte[]{42}); FailingSplitRegionObserver observer=(FailingSplitRegionObserver)region.getCoprocessorHost().findCoprocessor(FailingSplitRegionObserver.class.getName()); assertNotNull(observer); observer.latch.await(); LOG.info("Waiting for region to come out of RIT"); TESTING_UTIL.waitFor(60000,1000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { RegionStates regionStates=cluster.getMaster().getAssignmentManager().getRegionStates(); Map rit=regionStates.getRegionsInTransition(); return !rit.containsKey(hri.getEncodedName()); } } ); } finally { admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); TESTING_UTIL.deleteTable(tableName); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSSHCleanupDaugtherRegionsOfAbortedSplit() throws Exception { TableName table=TableName.valueOf("testSSHCleanupDaugtherRegionsOfAbortedSplit"); try { HTableDescriptor desc=new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f"))); admin.createTable(desc); Connection connection=ConnectionFactory.createConnection(cluster.getConfiguration()); Table hTable=connection.getTable(desc.getTableName()); for (int i=1; i < 5; i++) { Put p1=new Put(("r" + i).getBytes()); p1.addColumn(Bytes.toBytes("f"),"q1".getBytes(),"v".getBytes()); hTable.put(p1); } admin.flush(desc.getTableName()); List regions=cluster.getRegions(desc.getTableName()); int serverWith=cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer=cluster.getRegionServer(serverWith); SplitTransactionImpl st=new SplitTransactionImpl(regions.get(0),Bytes.toBytes("r3")); st.prepare(); st.stepsBeforePONR(regionServer,regionServer,false); Path tableDir=FSUtils.getTableDir(cluster.getMaster().getMasterFileSystem().getRootDir(),desc.getTableName()); List regionDirs=FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()),tableDir); assertEquals(3,regionDirs.size()); regionServer.kill(); while (!cluster.getMaster().getServerManager().getDeadServers().isDeadServer(regionServer.serverName)) { Thread.sleep(10); } while (cluster.getMaster().getServerManager().areDeadServersInProgress()) { Thread.sleep(10); } AssignmentManager am=cluster.getMaster().getAssignmentManager(); assertEquals(am.getRegionStates().getRegionsInTransition().toString(),0,am.getRegionStates().getRegionsInTransition().size()); regionDirs=FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()),tableDir); assertEquals(1,regionDirs.size()); } finally { TESTING_UTIL.deleteTable(table); } }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * If a table has regions that have no store files in a region, they should split successfully * into two regions with no store files. */ @Test(timeout=60000) public void testSplitRegionWithNoStoreFiles() throws Exception { final TableName tableName=TableName.valueOf("testSplitRegionWithNoStoreFiles"); createTableAndWait(tableName,HConstants.CATALOG_FAMILY); List regions=cluster.getRegions(tableName); HRegionInfo hri=getAndCheckSingleTableRegion(regions); ensureTableRegionNotOnSameServerAsMeta(admin,hri); int regionServerIndex=cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer=cluster.getRegionServer(regionServerIndex); this.admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); try { printOutRegions(regionServer,"Initial regions: "); Configuration conf=cluster.getConfiguration(); HBaseFsck.debugLsr(conf,new Path("/")); Path rootDir=FSUtils.getRootDir(conf); FileSystem fs=TESTING_UTIL.getDFSCluster().getFileSystem(); Map storefiles=FSUtils.getTableStoreFilePathMap(null,fs,rootDir,tableName); assertEquals("Expected nothing but found " + storefiles.toString(),storefiles.size(),0); regions=cluster.getRegions(tableName); final HRegion region=findSplittableRegion(regions); assertTrue("not able to find a splittable region",region != null); SplitTransactionImpl st=new MockedSplitTransaction(region,Bytes.toBytes("row2")); try { st.prepare(); st.execute(regionServer,regionServer); } catch ( IOException e) { fail("Split execution should have succeeded with no exceptions thrown"); } List daughters=cluster.getRegions(tableName); assertTrue(daughters.size() == 2); HBaseFsck.debugLsr(conf,new Path("/")); Map storefilesAfter=FSUtils.getTableStoreFilePathMap(null,fs,rootDir,tableName); assertEquals("Expected nothing but found " + storefilesAfter.toString(),storefilesAfter.size(),0); hri=region.getRegionInfo(); AssignmentManager am=cluster.getMaster().getAssignmentManager(); RegionStates regionStates=am.getRegionStates(); long start=EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri,State.SPLIT)) { assertFalse("Timed out in waiting split parent to be in state SPLIT",EnvironmentEdgeManager.currentTime() - start > 60000); Thread.sleep(500); } am.assign(hri,true); assertFalse("Split region can't be assigned",regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri,State.SPLIT)); am.unassign(hri,null); assertFalse("Split region can't be unassigned",regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri,State.SPLIT)); } finally { admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testExistingZnodeBlocksSplitAndWeRollback() throws IOException, InterruptedException, NodeExistsException, KeeperException, ServiceException { final TableName tableName=TableName.valueOf("testExistingZnodeBlocksSplitAndWeRollback"); Table t=createTableAndWait(tableName,HConstants.CATALOG_FAMILY); List regions=cluster.getRegions(tableName); HRegionInfo hri=getAndCheckSingleTableRegion(regions); int tableRegionIndex=ensureTableRegionNotOnSameServerAsMeta(admin,hri); RegionStates regionStates=cluster.getMaster().getAssignmentManager().getRegionStates(); this.admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); try { TESTING_UTIL.loadTable(t,HConstants.CATALOG_FAMILY,false); HRegionServer server=cluster.getRegionServer(tableRegionIndex); printOutRegions(server,"Initial regions: "); int regionCount=ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size(); regionStates.updateRegionState(hri,RegionState.State.CLOSING); this.admin.splitRegion(hri.getRegionName()); this.admin.splitRegion(hri.getRegionName()); this.admin.splitRegion(hri.getRegionName()); for (int i=0; i < 10; i++) { Thread.sleep(100); assertEquals(regionCount,ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size()); } regionStates.regionOnline(hri,server.getServerName()); split(hri,server,regionCount); checkAndGetDaughters(tableName); } finally { admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); t.close(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
/** * Not really restarting the master. Simulate it by clear of new region * state since it is not persisted, will be lost after master restarts. */ @Test(timeout=180000) public void testSplitAndRestartingMaster() throws Exception { LOG.info("Starting testSplitAndRestartingMaster"); final TableName tableName=TableName.valueOf("testSplitAndRestartingMaster"); createTableAndWait(tableName,HConstants.CATALOG_FAMILY); List regions=cluster.getRegions(tableName); HRegionInfo hri=getAndCheckSingleTableRegion(regions); ensureTableRegionNotOnSameServerAsMeta(admin,hri); int regionServerIndex=cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer=cluster.getRegionServer(regionServerIndex); this.admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); try { MyMasterRpcServices.enabled.set(true); regions=cluster.getRegions(tableName); final HRegion region=findSplittableRegion(regions); assertTrue("not able to find a splittable region",region != null); SplitTransactionImpl st=new SplitTransactionImpl(region,Bytes.toBytes("row2")); try { st.prepare(); st.execute(regionServer,regionServer); } catch ( IOException e) { fail("Split execution should have succeeded with no exceptions thrown"); } List daughters=cluster.getRegions(tableName); LOG.info("xxx " + regions.size() + AssignmentManager.TEST_SKIP_SPLIT_HANDLING); assertTrue(daughters.size() == 2); } finally { MyMasterRpcServices.enabled.set(false); admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that if daughter split on us, we won't do the shutdown handler fixup * just because we can't find the immediate daughter of an offlined parent. * @throws IOException * @throws InterruptedException */ @Test(timeout=300000) public void testShutdownFixupWhenDaughterHasSplit() throws IOException, InterruptedException { final TableName tableName=TableName.valueOf("testShutdownFixupWhenDaughterHasSplit"); Table t=createTableAndWait(tableName,HConstants.CATALOG_FAMILY); List regions=cluster.getRegions(tableName); HRegionInfo hri=getAndCheckSingleTableRegion(regions); int tableRegionIndex=ensureTableRegionNotOnSameServerAsMeta(admin,hri); this.admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); try { TESTING_UTIL.loadTable(t,HConstants.CATALOG_FAMILY); HRegionServer server=cluster.getRegionServer(tableRegionIndex); printOutRegions(server,"Initial regions: "); int regionCount=ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size(); split(hri,server,regionCount); List daughters=checkAndGetDaughters(tableName); regionCount=ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size(); HRegionInfo daughter=daughters.get(0).getRegionInfo(); LOG.info("Daughter we are going to split: " + daughter); this.admin.compactRegion(daughter.getRegionName()); daughters=cluster.getRegions(tableName); HRegion daughterRegion=null; for ( HRegion r : daughters) { if (r.getRegionInfo().equals(daughter)) { daughterRegion=r; LOG.info("Found matching HRI: " + daughterRegion); break; } } assertTrue(daughterRegion != null); for (int i=0; i < 100; i++) { if (!daughterRegion.hasReferences()) break; Threads.sleep(100); } assertFalse("Waiting for reference to be compacted",daughterRegion.hasReferences()); LOG.info("Daughter hri before split (has been compacted): " + daughter); split(daughter,server,regionCount); daughters=cluster.getRegions(tableName); for ( HRegion d : daughters) { LOG.info("Regions before crash: " + d); } cluster.abortRegionServer(tableRegionIndex); waitUntilRegionServerDead(); awaitDaughters(tableName,daughters.size()); regions=cluster.getRegions(tableName); for ( HRegion d : daughters) { LOG.info("Regions after crash: " + d); } assertEquals(daughters.size(),regions.size()); for ( HRegion r : regions) { LOG.info("Regions post crash " + r); assertTrue("Missing region post crash " + r,daughters.contains(r)); } } finally { admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); t.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=180000) public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exception { TableName userTableName=TableName.valueOf("testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles"); HTableDescriptor htd=new HTableDescriptor(userTableName); HColumnDescriptor hcd=new HColumnDescriptor("col"); htd.addFamily(hcd); admin.createTable(htd); Table table=TESTING_UTIL.getConnection().getTable(userTableName); try { for (int i=0; i <= 5; i++) { String row="row" + i; Put p=new Put(row.getBytes()); String val="Val" + i; p.addColumn("col".getBytes(),"ql".getBytes(),val.getBytes()); table.put(p); admin.flush(userTableName); Delete d=new Delete(row.getBytes()); table.delete(d); admin.flush(userTableName); } admin.majorCompact(userTableName); List regionsOfTable=TESTING_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionsOfTable(userTableName); HRegionInfo hRegionInfo=regionsOfTable.get(0); Put p=new Put("row6".getBytes()); p.addColumn("col".getBytes(),"ql".getBytes(),"val".getBytes()); table.put(p); p=new Put("row7".getBytes()); p.addColumn("col".getBytes(),"ql".getBytes(),"val".getBytes()); table.put(p); p=new Put("row8".getBytes()); p.addColumn("col".getBytes(),"ql".getBytes(),"val".getBytes()); table.put(p); admin.flush(userTableName); admin.splitRegion(hRegionInfo.getRegionName(),"row7".getBytes()); regionsOfTable=TESTING_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionsOfTable(userTableName); while (regionsOfTable.size() != 2) { Thread.sleep(2000); regionsOfTable=TESTING_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionsOfTable(userTableName); } Assert.assertEquals(2,regionsOfTable.size()); Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); int mainTableCount=0; for (Result rr=scanner.next(); rr != null; rr=scanner.next()) { mainTableCount++; } Assert.assertEquals(3,mainTableCount); } finally { table.close(); } }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testSplitHooksBeforeAndAfterPONR() throws Exception { TableName firstTable=TableName.valueOf("testSplitHooksBeforeAndAfterPONR_1"); TableName secondTable=TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2"); HColumnDescriptor hcd=new HColumnDescriptor("cf"); HTableDescriptor desc=new HTableDescriptor(firstTable); desc.addCoprocessor(MockedRegionObserver.class.getName()); desc.addFamily(hcd); admin.createTable(desc); TESTING_UTIL.waitUntilAllRegionsAssigned(firstTable); desc=new HTableDescriptor(secondTable); desc.addFamily(hcd); admin.createTable(desc); TESTING_UTIL.waitUntilAllRegionsAssigned(secondTable); List firstTableRegions=cluster.getRegions(firstTable); List secondTableRegions=cluster.getRegions(secondTable); if (firstTableRegions.size() == 0 || secondTableRegions.size() == 0) { fail("Each table should have at least one region."); } ServerName serverName=cluster.getServerHoldingRegion(firstTable,firstTableRegions.get(0).getRegionInfo().getRegionName()); admin.move(secondTableRegions.get(0).getRegionInfo().getEncodedNameAsBytes(),Bytes.toBytes(serverName.getServerName())); Table table1=null; Table table2=null; try { table1=TESTING_UTIL.getConnection().getTable(firstTable); table2=TESTING_UTIL.getConnection().getTable(firstTable); insertData(firstTable,admin,table1); insertData(secondTable,admin,table2); admin.split(firstTable,"row2".getBytes()); firstTableRegions=cluster.getRegions(firstTable); while (firstTableRegions.size() != 2) { Thread.sleep(1000); firstTableRegions=cluster.getRegions(firstTable); } assertEquals("Number of regions after split should be 2.",2,firstTableRegions.size()); secondTableRegions=cluster.getRegions(secondTable); assertEquals("Number of regions after split should be 2.",2,secondTableRegions.size()); } finally { if (table1 != null) { table1.close(); } if (table2 != null) { table2.close(); } TESTING_UTIL.deleteTable(firstTable); TESTING_UTIL.deleteTable(secondTable); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() throws Exception { final TableName tableName=TableName.valueOf("testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck"); try { HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("f")); htd.addFamily(new HColumnDescriptor("i_f")); htd.setRegionSplitPolicyClassName(CustomSplitPolicy.class.getName()); admin.createTable(htd); List regions=awaitTableRegions(tableName); HRegion region=regions.get(0); for (int i=3; i < 9; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(Bytes.toBytes("f"),Bytes.toBytes("q"),Bytes.toBytes("value" + i)); p.addColumn(Bytes.toBytes("i_f"),Bytes.toBytes("q"),Bytes.toBytes("value" + i)); region.put(p); } region.flush(true); Store store=region.getStore(Bytes.toBytes("f")); Collection storefiles=store.getStorefiles(); assertEquals(storefiles.size(),1); assertFalse(region.hasReferences()); Path referencePath=region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(),"f",storefiles.iterator().next(),Bytes.toBytes("row1"),false,region.getSplitPolicy()); assertNull(referencePath); referencePath=region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(),"i_f",storefiles.iterator().next(),Bytes.toBytes("row1"),false,region.getSplitPolicy()); assertNotNull(referencePath); } finally { TESTING_UTIL.deleteTable(tableName); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Verifies HBASE-5806. Here the case is that splitting is completed but before the * CJ could remove the parent region the master is killed and restarted. * @throws IOException * @throws InterruptedException * @throws NodeExistsException * @throws KeeperException */ @Test(timeout=300000) public void testMasterRestartAtRegionSplitPendingCatalogJanitor() throws IOException, InterruptedException, NodeExistsException, KeeperException, ServiceException { final TableName tableName=TableName.valueOf("testMasterRestartAtRegionSplitPendingCatalogJanitor"); Table t=createTableAndWait(tableName,HConstants.CATALOG_FAMILY); List regions=cluster.getRegions(tableName); HRegionInfo hri=getAndCheckSingleTableRegion(regions); int tableRegionIndex=ensureTableRegionNotOnSameServerAsMeta(admin,hri); this.admin.setBalancerRunning(false,true); cluster.getMaster().setCatalogJanitorEnabled(false); ZooKeeperWatcher zkw=new ZooKeeperWatcher(t.getConfiguration(),"testMasterRestartAtRegionSplitPendingCatalogJanitor",new UselessTestAbortable()); try { TESTING_UTIL.loadTable(t,HConstants.CATALOG_FAMILY,false); HRegionServer server=cluster.getRegionServer(tableRegionIndex); printOutRegions(server,"Initial regions: "); this.admin.splitRegion(hri.getRegionName()); checkAndGetDaughters(tableName); HMaster master=abortAndWaitForMaster(); this.admin=TESTING_UTIL.getHBaseAdmin(); hri.setOffline(true); hri.setSplit(true); RegionStates regionStates=master.getAssignmentManager().getRegionStates(); assertTrue("Split parent should be in SPLIT state",regionStates.isRegionInState(hri,State.SPLIT)); ServerName regionServerOfRegion=regionStates.getRegionServerOfRegion(hri); assertTrue(regionServerOfRegion == null); } finally { this.admin.setBalancerRunning(true,false); cluster.getMaster().setCatalogJanitorEnabled(true); t.close(); zkw.close(); } }

Class: org.apache.hadoop.hbase.regionserver.TestSplitWalDataLoss

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void test() throws IOException, InterruptedException { final HRegionServer rs=testUtil.getRSForFirstRegionInTable(tableName); final HRegion region=(HRegion)rs.getOnlineRegions(tableName).get(0); HRegion spiedRegion=spy(region); final MutableBoolean flushed=new MutableBoolean(false); final MutableBoolean reported=new MutableBoolean(false); doAnswer(new Answer(){ @Override public FlushResult answer( InvocationOnMock invocation) throws Throwable { synchronized (flushed) { flushed.setValue(true); flushed.notifyAll(); } synchronized (reported) { while (!reported.booleanValue()) { reported.wait(); } } rs.getWAL(region.getRegionInfo()).abortCacheFlush(region.getRegionInfo().getEncodedNameAsBytes()); throw new DroppedSnapshotException("testcase"); } } ).when(spiedRegion).internalFlushCacheAndCommit(Matchers.any(),Matchers.any(),Matchers.any(),Matchers.>any()); String key=null; for ( Map.Entry entry : rs.onlineRegions.entrySet()) { if (entry.getValue().getRegionInfo().getTable().equals(this.tableName)) { key=entry.getKey(); break; } } rs.onlineRegions.put(key,spiedRegion); Connection conn=testUtil.getConnection(); try (Table table=conn.getTable(tableName)){ table.put(new Put(Bytes.toBytes("row0")).addColumn(family,qualifier,Bytes.toBytes("val0"))); } long oldestSeqIdOfStore=region.getOldestSeqIdOfStore(family); LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore); assertTrue(oldestSeqIdOfStore > HConstants.NO_SEQNUM); rs.cacheFlusher.requestFlush(spiedRegion,false); synchronized (flushed) { while (!flushed.booleanValue()) { flushed.wait(); } } try (Table table=conn.getTable(tableName)){ table.put(new Put(Bytes.toBytes("row1")).addColumn(family,qualifier,Bytes.toBytes("val1"))); } long now=EnvironmentEdgeManager.currentTime(); rs.tryRegionServerReport(now - 500,now); synchronized (reported) { reported.setValue(true); reported.notifyAll(); } while (testUtil.getRSForFirstRegionInTable(tableName) == rs) { Thread.sleep(100); } try (Table table=conn.getTable(tableName)){ Result result=table.get(new Get(Bytes.toBytes("row0"))); assertArrayEquals(Bytes.toBytes("val0"),result.getValue(family,qualifier)); } }

Class: org.apache.hadoop.hbase.regionserver.TestStore

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIncrementColumnValue_ICVDuringFlush() throws IOException, InterruptedException { init(this.name.getMethodName()); long oldValue=1L; long newValue=3L; this.store.add(new KeyValue(row,family,qf1,System.currentTimeMillis(),Bytes.toBytes(oldValue))); this.store.snapshot(); this.store.add(new KeyValue(row,family,qf2,System.currentTimeMillis(),Bytes.toBytes(oldValue))); long ret=this.store.updateColumnValue(row,family,qf1,newValue); Assert.assertTrue(ret > 0); flushStore(store,id++); Assert.assertEquals(1,this.store.getStorefiles().size()); Assert.assertEquals(2,((DefaultMemStore)this.store.memstore).cellSet.size()); Get get=new Get(row); get.addColumn(family,qf1); get.setMaxVersions(); List results=new ArrayList(); results=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertEquals(2,results.size()); long ts1=results.get(0).getTimestamp(); long ts2=results.get(1).getTimestamp(); Assert.assertTrue(ts1 > ts2); Assert.assertEquals(newValue,Bytes.toLong(CellUtil.cloneValue(results.get(0)))); Assert.assertEquals(oldValue,Bytes.toLong(CellUtil.cloneValue(results.get(1)))); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHandleErrorsInFlush() throws Exception { LOG.info("Setting up a faulty file system that cannot write"); final Configuration conf=HBaseConfiguration.create(); User user=User.createUserForTesting(conf,"testhandleerrorsinflush",new String[]{"foo"}); conf.setClass("fs.file.impl",FaultyFileSystem.class,FileSystem.class); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { FileSystem fs=FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class,fs.getClass()); init(name.getMethodName(),conf); LOG.info("Adding some data"); store.add(new KeyValue(row,family,qf1,1,(byte[])null)); store.add(new KeyValue(row,family,qf2,1,(byte[])null)); store.add(new KeyValue(row,family,qf3,1,(byte[])null)); LOG.info("Before flush, we should have no files"); Collection files=store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); Assert.assertEquals(0,files != null ? files.size() : 0); try { LOG.info("Flushing"); flush(1); Assert.fail("Didn't bubble up IOE!"); } catch ( IOException ioe) { Assert.assertTrue(ioe.getMessage().contains("Fault injected")); } LOG.info("After failed flush, we should still have no files!"); files=store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); Assert.assertEquals(0,files != null ? files.size() : 0); store.getHRegion().getWAL().close(); return null; } } ); FileSystem.closeAllForUGI(user.getUGI()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Verify that compression and data block encoding are respected by the * Store.createWriterInTmp() method, used on store flush. */ @Test public void testCreateWriter() throws Exception { Configuration conf=HBaseConfiguration.create(); FileSystem fs=FileSystem.get(conf); HColumnDescriptor hcd=new HColumnDescriptor(family); hcd.setCompressionType(Compression.Algorithm.GZ); hcd.setDataBlockEncoding(DataBlockEncoding.DIFF); init(name.getMethodName(),conf,hcd); StoreFile.Writer writer=store.createWriterInTmp(4,hcd.getCompressionType(),false,true,false); Path path=writer.getPath(); writer.append(new KeyValue(row,family,qf1,Bytes.toBytes(1))); writer.append(new KeyValue(row,family,qf2,Bytes.toBytes(2))); writer.append(new KeyValue(row2,family,qf1,Bytes.toBytes(3))); writer.append(new KeyValue(row2,family,qf2,Bytes.toBytes(4))); writer.close(); HFile.Reader reader=HFile.createReader(fs,path,new CacheConfig(conf),conf); Assert.assertEquals(hcd.getCompressionType(),reader.getCompressionAlgorithm()); Assert.assertEquals(hcd.getDataBlockEncoding(),reader.getDataBlockEncoding()); reader.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test for hbase-1686. * @throws IOException */ @Test public void testEmptyStoreFile() throws IOException { init(this.name.getMethodName()); this.store.add(new KeyValue(row,family,qf1,1,(byte[])null)); this.store.add(new KeyValue(row,family,qf2,1,(byte[])null)); flush(1); StoreFile f=this.store.getStorefiles().iterator().next(); Path storedir=f.getPath().getParent(); long seqid=f.getMaxSequenceId(); Configuration c=HBaseConfiguration.create(); FileSystem fs=FileSystem.get(c); HFileContext meta=new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); StoreFile.Writer w=new StoreFile.WriterBuilder(c,new CacheConfig(c),fs).withOutputDir(storedir).withFileContext(meta).build(); w.appendMetadata(seqid + 1,false); w.close(); this.store.close(); this.store=new HStore(this.store.getHRegion(),this.store.getFamily(),c); Assert.assertEquals(2,this.store.getStorefilesCount()); result=HBaseTestingUtility.getFromStoreFile(store,get.getRow(),qualifiers); Assert.assertEquals(1,result.size()); }

InternalCallVerifier NullVerifier 
/** * Test for HBASE-3492 - Test split on empty colfam (no store files). * @throws IOException When the IO operations fail. */ @Test public void testSplitWithEmptyColFam() throws IOException { init(this.name.getMethodName()); Assert.assertNull(store.getSplitPoint()); store.getHRegion().forceSplit(null); Assert.assertNull(store.getSplitPoint()); store.getHRegion().clearSplit(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIncrementColumnValue_SnapshotFlushCombo() throws Exception { ManualEnvironmentEdge mee=new ManualEnvironmentEdge(); EnvironmentEdgeManagerTestHelper.injectEdge(mee); init(this.name.getMethodName()); long oldValue=1L; long newValue=3L; this.store.add(new KeyValue(row,family,qf1,EnvironmentEdgeManager.currentTime(),Bytes.toBytes(oldValue))); this.store.snapshot(); long ret=this.store.updateColumnValue(row,family,qf1,newValue); Assert.assertTrue(ret > 0); flushStore(store,id++); Assert.assertEquals(1,this.store.getStorefiles().size()); Assert.assertEquals(1,((DefaultMemStore)this.store.memstore).cellSet.size()); newValue+=1; this.store.updateColumnValue(row,family,qf1,newValue); newValue+=1; this.store.updateColumnValue(row,family,qf1,newValue); Get get=new Get(row); get.addColumn(family,qf1); get.setMaxVersions(); List results=new ArrayList(); results=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertEquals(2,results.size()); long ts1=results.get(0).getTimestamp(); long ts2=results.get(1).getTimestamp(); Assert.assertTrue(ts1 > ts2); Assert.assertEquals(newValue,Bytes.toLong(CellUtil.cloneValue(results.get(0)))); Assert.assertEquals(oldValue,Bytes.toLong(CellUtil.cloneValue(results.get(1)))); mee.setValue(2); newValue+=1; this.store.updateColumnValue(row,family,qf1,newValue); results=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertEquals(2,results.size()); ts1=results.get(0).getTimestamp(); ts2=results.get(1).getTimestamp(); Assert.assertTrue(ts1 > ts2); Assert.assertEquals(newValue,Bytes.toLong(CellUtil.cloneValue(results.get(0)))); Assert.assertEquals(oldValue,Bytes.toLong(CellUtil.cloneValue(results.get(1)))); }

InternalCallVerifier EqualityVerifier 
@Test public void testRefreshStoreFiles() throws Exception { init(name.getMethodName()); assertEquals(0,this.store.getStorefilesCount()); this.store.add(new KeyValue(row,family,qf1,1,(byte[])null)); flush(1); assertEquals(1,this.store.getStorefilesCount()); addStoreFile(); assertEquals(1,this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(2,this.store.getStorefilesCount()); addStoreFile(); addStoreFile(); addStoreFile(); assertEquals(2,this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(5,this.store.getStorefilesCount()); closeCompactedFile(0); archiveStoreFile(0); assertEquals(5,this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(4,this.store.getStorefilesCount()); archiveStoreFile(0); archiveStoreFile(1); archiveStoreFile(2); assertEquals(4,this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(1,this.store.getStorefilesCount()); archiveStoreFile(0); store.refreshStoreFiles(); assertEquals(0,this.store.getStorefilesCount()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testICV_negMemstoreSize() throws IOException { init(this.name.getMethodName()); long time=100; ManualEnvironmentEdge ee=new ManualEnvironmentEdge(); ee.setValue(time); EnvironmentEdgeManagerTestHelper.injectEdge(ee); long newValue=3L; long size=0; size+=this.store.add(new KeyValue(Bytes.toBytes("200909091000"),family,qf1,System.currentTimeMillis(),Bytes.toBytes(newValue))); size+=this.store.add(new KeyValue(Bytes.toBytes("200909091200"),family,qf1,System.currentTimeMillis(),Bytes.toBytes(newValue))); size+=this.store.add(new KeyValue(Bytes.toBytes("200909091300"),family,qf1,System.currentTimeMillis(),Bytes.toBytes(newValue))); size+=this.store.add(new KeyValue(Bytes.toBytes("200909091400"),family,qf1,System.currentTimeMillis(),Bytes.toBytes(newValue))); size+=this.store.add(new KeyValue(Bytes.toBytes("200909091500"),family,qf1,System.currentTimeMillis(),Bytes.toBytes(newValue))); for (int i=0; i < 10000; ++i) { newValue++; long ret=this.store.updateColumnValue(row,family,qf1,newValue); long ret2=this.store.updateColumnValue(row2,family,qf1,newValue); if (ret != 0) System.out.println("ret: " + ret); if (ret2 != 0) System.out.println("ret2: " + ret2); Assert.assertTrue("ret: " + ret,ret >= 0); size+=ret; Assert.assertTrue("ret2: " + ret2,ret2 >= 0); size+=ret2; if (i % 1000 == 0) ee.setValue(++time); } long computedSize=0; for ( Cell cell : ((DefaultMemStore)this.store.memstore).cellSet) { long kvsize=DefaultMemStore.heapSizeChange(cell,true); computedSize+=kvsize; } Assert.assertEquals(computedSize,size); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test we do not lose data if we fail a flush and then close. * Part of HBase-10466 * @throws Exception */ @Test public void testFlushSizeAccounting() throws Exception { LOG.info("Setting up a faulty file system that cannot write in " + this.name.getMethodName()); final Configuration conf=HBaseConfiguration.create(); conf.setInt("hbase.hstore.flush.retries.number",1); User user=User.createUserForTesting(conf,this.name.getMethodName(),new String[]{"foo"}); conf.setClass("fs.file.impl",FaultyFileSystem.class,FileSystem.class); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { FileSystem fs=FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class,fs.getClass()); FaultyFileSystem ffs=(FaultyFileSystem)fs; init(name.getMethodName(),conf); long size=store.memstore.getFlushableSize(); Assert.assertEquals(0,size); LOG.info("Adding some data"); long kvSize=store.add(new KeyValue(row,family,qf1,1,(byte[])null)); size=store.memstore.getFlushableSize(); Assert.assertEquals(kvSize,size); try { LOG.info("Flushing"); flushStore(store,id++); Assert.fail("Didn't bubble up IOE!"); } catch ( IOException ioe) { Assert.assertTrue(ioe.getMessage().contains("Fault injected")); } size=store.memstore.getFlushableSize(); Assert.assertEquals(kvSize,size); store.add(new KeyValue(row,family,qf2,2,(byte[])null)); Assert.assertEquals(kvSize,size); ffs.fault.set(false); flushStore(store,id++); size=store.memstore.getFlushableSize(); Assert.assertEquals(kvSize,size); flushStore(store,id++); size=store.memstore.getFlushableSize(); Assert.assertEquals(0,size); return null; } } ); }

InternalCallVerifier BooleanVerifier 
@Test public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception { final String CONFIG_KEY="hbase.regionserver.thread.compaction.throttle"; long anyValue=10; Configuration conf=HBaseConfiguration.create(); conf.setLong(CONFIG_KEY,anyValue); init(name.getMethodName() + "-xml",conf); Assert.assertTrue(store.throttleCompaction(anyValue + 1)); Assert.assertFalse(store.throttleCompaction(anyValue)); --anyValue; HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(table)); HColumnDescriptor hcd=new HColumnDescriptor(family); htd.setConfiguration(CONFIG_KEY,Long.toString(anyValue)); init(name.getMethodName() + "-htd",conf,htd,hcd); Assert.assertTrue(store.throttleCompaction(anyValue + 1)); Assert.assertFalse(store.throttleCompaction(anyValue)); --anyValue; hcd.setConfiguration(CONFIG_KEY,Long.toString(anyValue)); init(name.getMethodName() + "-hcd",conf,htd,hcd); Assert.assertTrue(store.throttleCompaction(anyValue + 1)); Assert.assertFalse(store.throttleCompaction(anyValue)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test to ensure correctness when using Stores with multiple timestamps * @throws IOException */ @Test public void testMultipleTimestamps() throws IOException { int numRows=1; long[] timestamps1=new long[]{1,5,10,20}; long[] timestamps2=new long[]{30,80}; init(this.name.getMethodName()); List kvList1=getKeyValueSet(timestamps1,numRows,qf1,family); for ( Cell kv : kvList1) { this.store.add(KeyValueUtil.ensureKeyValue(kv)); } this.store.snapshot(); flushStore(store,id++); List kvList2=getKeyValueSet(timestamps2,numRows,qf1,family); for ( Cell kv : kvList2) { this.store.add(KeyValueUtil.ensureKeyValue(kv)); } List result; Get get=new Get(Bytes.toBytes(1)); get.addColumn(family,qf1); get.setTimeRange(0,15); result=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertTrue(result.size() > 0); get.setTimeRange(40,90); result=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertTrue(result.size() > 0); get.setTimeRange(10,45); result=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertTrue(result.size() > 0); get.setTimeRange(80,145); result=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertTrue(result.size() > 0); get.setTimeRange(1,2); result=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertTrue(result.size() > 0); get.setTimeRange(90,200); result=HBaseTestingUtility.getFromStoreFile(store,get); Assert.assertTrue(result.size() == 0); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLowestModificationTime() throws Exception { Configuration conf=HBaseConfiguration.create(); FileSystem fs=FileSystem.get(conf); init(name.getMethodName(),conf); int storeFileNum=4; for (int i=1; i <= storeFileNum; i++) { LOG.info("Adding some data for the store file #" + i); this.store.add(new KeyValue(row,family,qf1,i,(byte[])null)); this.store.add(new KeyValue(row,family,qf2,i,(byte[])null)); this.store.add(new KeyValue(row,family,qf3,i,(byte[])null)); flush(i); } long lowestTimeStampFromManager=StoreUtils.getLowestTimestamp(store.getStorefiles()); long lowestTimeStampFromFS=getLowestTimeStampFromFS(fs,store.getStorefiles()); Assert.assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS); store.compact(store.requestCompaction(),NoLimitThroughputController.INSTANCE); lowestTimeStampFromManager=StoreUtils.getLowestTimestamp(store.getStorefiles()); lowestTimeStampFromFS=getLowestTimeStampFromFS(fs,store.getStorefiles()); Assert.assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS); }

Class: org.apache.hadoop.hbase.regionserver.TestStoreFile

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier 
/** * Test that our mechanism of writing store files in one region to reference * store files in other regions works. * @throws IOException */ @Test public void testReference() throws IOException { final HRegionInfo hri=new HRegionInfo(TableName.valueOf("testReferenceTb")); HRegionFileSystem regionFs=HRegionFileSystem.createRegionOnFileSystem(conf,fs,new Path(testDir,hri.getTable().getNameAsString()),hri); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build(); writeStoreFile(writer); Path hsfPath=regionFs.commitStoreFile(TEST_FAMILY,writer.getPath()); StoreFile hsf=new StoreFile(this.fs,hsfPath,conf,cacheConf,BloomType.NONE); StoreFile.Reader reader=hsf.createReader(); Cell kv=reader.midkey(); byte[] midRow=CellUtil.cloneRow(kv); kv=reader.getLastKey(); byte[] finalRow=CellUtil.cloneRow(kv); hsf.closeReader(true); HRegionInfo splitHri=new HRegionInfo(hri.getTable(),null,midRow); Path refPath=splitStoreFile(regionFs,splitHri,TEST_FAMILY,hsf,midRow,true); StoreFile refHsf=new StoreFile(this.fs,refPath,conf,cacheConf,BloomType.NONE); HFileScanner s=refHsf.createReader().getScanner(false,false); for (boolean first=true; (!s.isSeeked() && s.seekTo()) || s.next(); ) { ByteBuffer bb=ByteBuffer.wrap(((KeyValue)s.getKey()).getKey()); kv=KeyValueUtil.createKeyValueFromKey(bb); if (first) { assertTrue(Bytes.equals(kv.getRowArray(),kv.getRowOffset(),kv.getRowLength(),midRow,0,midRow.length)); first=false; } } assertTrue(Bytes.equals(kv.getRowArray(),kv.getRowOffset(),kv.getRowLength(),finalRow,0,finalRow.length)); }

InternalCallVerifier NullVerifier 
/** * Test for HBASE-8012 */ @Test public void testReseek() throws Exception { Path f=new Path(ROOT_DIR,getName()); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,this.fs).withFilePath(f).withFileContext(meta).build(); writeStoreFile(writer); writer.close(); StoreFile.Reader reader=new StoreFile.Reader(fs,f,cacheConf,conf); KeyValue k=KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY); StoreFileScanner s=reader.getStoreFileScanner(false,false); s.reseek(k); assertNotNull("Intial reseek should position at the beginning of the file",s.peek()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Check if data block encoding information is saved correctly in HFile's * file info. */ @Test public void testDataBlockEncodingMetaData() throws IOException { Path dir=new Path(new Path(testDir,"7e0102"),"familyname"); Path path=new Path(dir,"1234567890"); DataBlockEncoding dataBlockEncoderAlgo=DataBlockEncoding.FAST_DIFF; HFileDataBlockEncoder dataBlockEncoder=new HFileDataBlockEncoderImpl(dataBlockEncoderAlgo); cacheConf=new CacheConfig(conf); HFileContext meta=new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,this.fs).withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build(); writer.close(); StoreFile storeFile=new StoreFile(fs,writer.getPath(),conf,cacheConf,BloomType.NONE); StoreFile.Reader reader=storeFile.createReader(); Map fileInfo=reader.loadFileInfo(); byte[] value=fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING); assertEquals(dataBlockEncoderAlgo.getNameInBytes(),value); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHFileLink() throws IOException { final HRegionInfo hri=new HRegionInfo(TableName.valueOf("testHFileLinkTb")); Configuration testConf=new Configuration(this.conf); FSUtils.setRootDir(testConf,testDir); HRegionFileSystem regionFs=HRegionFileSystem.createRegionOnFileSystem(testConf,fs,FSUtils.getTableDir(testDir,hri.getTable()),hri); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build(); writeStoreFile(writer); Path storeFilePath=regionFs.commitStoreFile(TEST_FAMILY,writer.getPath()); Path dstPath=new Path(regionFs.getTableDir(),new Path("test-region",TEST_FAMILY)); HFileLink.create(testConf,this.fs,dstPath,hri,storeFilePath.getName()); Path linkFilePath=new Path(dstPath,HFileLink.createHFileLinkName(hri,storeFilePath.getName())); StoreFileInfo storeFileInfo=new StoreFileInfo(testConf,this.fs,linkFilePath); StoreFile hsf=new StoreFile(this.fs,storeFileInfo,testConf,cacheConf,BloomType.NONE); assertTrue(storeFileInfo.isLink()); int count=1; HFileScanner s=hsf.createReader().getScanner(false,false); s.seekTo(); while (s.next()) { count++; } assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1),count); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBloomTypes() throws Exception { float err=(float)0.01; FileSystem fs=FileSystem.getLocal(conf); conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,err); conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED,true); int rowCount=50; int colCount=10; int versions=2; BloomType[] bt={BloomType.ROWCOL,BloomType.ROW}; int[] expKeys={rowCount * colCount,rowCount}; float[] expErr={2 * rowCount * colCount* err,2 * rowCount * 2* colCount* err}; for ( int x : new int[]{0,1}) { Path f=new Path(ROOT_DIR,getName() + x); HFileContext meta=new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,this.fs).withFilePath(f).withBloomType(bt[x]).withMaxKeyCount(expKeys[x]).withFileContext(meta).build(); long now=System.currentTimeMillis(); for (int i=0; i < rowCount * 2; i+=2) { for (int j=0; j < colCount * 2; j+=2) { String row=String.format(localFormatter,i); String col=String.format(localFormatter,j); for (int k=0; k < versions; ++k) { KeyValue kv=new KeyValue(row.getBytes(),"family".getBytes(),("col" + col).getBytes(),now - k,Bytes.toBytes((long)-1)); writer.append(kv); } } } writer.close(); StoreFile.Reader reader=new StoreFile.Reader(fs,f,cacheConf,conf); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner=reader.getStoreFileScanner(false,false); assertEquals(expKeys[x],reader.generalBloomFilter.getKeyCount()); Store store=mock(Store.class); HColumnDescriptor hcd=mock(HColumnDescriptor.class); when(hcd.getName()).thenReturn(Bytes.toBytes("family")); when(store.getFamily()).thenReturn(hcd); int falsePos=0; int falseNeg=0; for (int i=0; i < rowCount * 2; ++i) { for (int j=0; j < colCount * 2; ++j) { String row=String.format(localFormatter,i); String col=String.format(localFormatter,j); TreeSet columns=new TreeSet(Bytes.BYTES_COMPARATOR); columns.add(("col" + col).getBytes()); Scan scan=new Scan(row.getBytes(),row.getBytes()); scan.addColumn("family".getBytes(),("col" + col).getBytes()); boolean exists=scanner.shouldUseScanner(scan,store,Long.MIN_VALUE); boolean shouldRowExist=i % 2 == 0; boolean shouldColExist=j % 2 == 0; shouldColExist=shouldColExist || bt[x] == BloomType.ROW; if (shouldRowExist && shouldColExist) { if (!exists) falseNeg++; } else { if (exists) falsePos++; } } } reader.close(true); fs.delete(f,true); System.out.println(bt[x].toString()); System.out.println(" False negatives: " + falseNeg); System.out.println(" False positives: " + falsePos); assertEquals(0,falseNeg); assertTrue(falsePos < 2 * expErr[x]); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test to ensure correctness when using StoreFile with multiple timestamps * @throws IOException */ @Test public void testMultipleTimestamps() throws IOException { byte[] family=Bytes.toBytes("familyname"); byte[] qualifier=Bytes.toBytes("qualifier"); int numRows=10; long[] timestamps=new long[]{20,10,5,1}; Scan scan=new Scan(); Path storedir=new Path(new Path(testDir,"7e0102"),Bytes.toString(family)); Path dir=new Path(storedir,"1234567890"); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,this.fs).withOutputDir(dir).withFileContext(meta).build(); List kvList=getKeyValueSet(timestamps,numRows,qualifier,family); for ( KeyValue kv : kvList) { writer.append(kv); } writer.appendMetadata(0,false); writer.close(); StoreFile hsf=new StoreFile(this.fs,writer.getPath(),conf,cacheConf,BloomType.NONE); Store store=mock(Store.class); HColumnDescriptor hcd=mock(HColumnDescriptor.class); when(hcd.getName()).thenReturn(family); when(store.getFamily()).thenReturn(hcd); StoreFile.Reader reader=hsf.createReader(); StoreFileScanner scanner=reader.getStoreFileScanner(false,false); TreeSet columns=new TreeSet(Bytes.BYTES_COMPARATOR); columns.add(qualifier); scan.setTimeRange(20,100); assertTrue(scanner.shouldUseScanner(scan,store,Long.MIN_VALUE)); scan.setTimeRange(1,2); assertTrue(scanner.shouldUseScanner(scan,store,Long.MIN_VALUE)); scan.setTimeRange(8,10); assertTrue(scanner.shouldUseScanner(scan,store,Long.MIN_VALUE)); scan.setColumnFamilyTimeRange(family,7,50); assertTrue(scanner.shouldUseScanner(scan,store,Long.MIN_VALUE)); scan=new Scan(); scan.setTimeRange(27,50); assertTrue(!scanner.shouldUseScanner(scan,store,Long.MIN_VALUE)); scan=new Scan(); scan.setTimeRange(27,50); scan.setColumnFamilyTimeRange(family,7,50); assertTrue(scanner.shouldUseScanner(scan,store,Long.MIN_VALUE)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCacheOnWriteEvictOnClose() throws Exception { Configuration conf=this.conf; Path baseDir=new Path(new Path(testDir,"7e0102"),"twoCOWEOC"); BlockCache bc=new CacheConfig(conf).getBlockCache(); assertNotNull(bc); CacheStats cs=bc.getStats(); long startHit=cs.getHitCount(); long startMiss=cs.getMissCount(); long startEvicted=cs.getEvictedCount(); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,false); CacheConfig cacheConf=new CacheConfig(conf); Path pathCowOff=new Path(baseDir,"123456789"); StoreFile.Writer writer=writeStoreFile(conf,cacheConf,pathCowOff,3); StoreFile hsf=new StoreFile(this.fs,writer.getPath(),conf,cacheConf,BloomType.NONE); LOG.debug(hsf.getPath().toString()); StoreFile.Reader reader=hsf.createReader(); reader.loadFileInfo(); StoreFileScanner scanner=reader.getStoreFileScanner(true,true); scanner.seek(KeyValue.LOWESTKEY); while (scanner.next() != null) ; assertEquals(startHit,cs.getHitCount()); assertEquals(startMiss + 3,cs.getMissCount()); assertEquals(startEvicted,cs.getEvictedCount()); startMiss+=3; scanner.close(); reader.close(cacheConf.shouldEvictOnClose()); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,true); cacheConf=new CacheConfig(conf); Path pathCowOn=new Path(baseDir,"123456788"); writer=writeStoreFile(conf,cacheConf,pathCowOn,3); hsf=new StoreFile(this.fs,writer.getPath(),conf,cacheConf,BloomType.NONE); reader=hsf.createReader(); scanner=reader.getStoreFileScanner(true,true); scanner.seek(KeyValue.LOWESTKEY); while (scanner.next() != null) ; assertEquals(startHit + 3,cs.getHitCount()); assertEquals(startMiss,cs.getMissCount()); assertEquals(startEvicted,cs.getEvictedCount()); startHit+=3; scanner.close(); reader.close(cacheConf.shouldEvictOnClose()); hsf=new StoreFile(this.fs,pathCowOff,conf,cacheConf,BloomType.NONE); StoreFile.Reader readerOne=hsf.createReader(); readerOne.loadFileInfo(); StoreFileScanner scannerOne=readerOne.getStoreFileScanner(true,true); scannerOne.seek(KeyValue.LOWESTKEY); hsf=new StoreFile(this.fs,pathCowOn,conf,cacheConf,BloomType.NONE); StoreFile.Reader readerTwo=hsf.createReader(); readerTwo.loadFileInfo(); StoreFileScanner scannerTwo=readerTwo.getStoreFileScanner(true,true); scannerTwo.seek(KeyValue.LOWESTKEY); Cell kv1=null; Cell kv2=null; while ((kv1=scannerOne.next()) != null) { kv2=scannerTwo.next(); assertTrue(kv1.equals(kv2)); KeyValue keyv1=KeyValueUtil.ensureKeyValue(kv1); KeyValue keyv2=KeyValueUtil.ensureKeyValue(kv2); assertTrue(Bytes.compareTo(keyv1.getBuffer(),keyv1.getKeyOffset(),keyv1.getKeyLength(),keyv2.getBuffer(),keyv2.getKeyOffset(),keyv2.getKeyLength()) == 0); assertTrue(Bytes.compareTo(kv1.getValueArray(),kv1.getValueOffset(),kv1.getValueLength(),kv2.getValueArray(),kv2.getValueOffset(),kv2.getValueLength()) == 0); } assertNull(scannerTwo.next()); assertEquals(startHit + 6,cs.getHitCount()); assertEquals(startMiss,cs.getMissCount()); assertEquals(startEvicted,cs.getEvictedCount()); startHit+=6; scannerOne.close(); readerOne.close(cacheConf.shouldEvictOnClose()); scannerTwo.close(); readerTwo.close(cacheConf.shouldEvictOnClose()); conf.setBoolean("hbase.rs.evictblocksonclose",true); cacheConf=new CacheConfig(conf); hsf=new StoreFile(this.fs,pathCowOff,conf,cacheConf,BloomType.NONE); reader=hsf.createReader(); reader.close(cacheConf.shouldEvictOnClose()); assertEquals(startHit,cs.getHitCount()); assertEquals(startMiss,cs.getMissCount()); assertEquals(startEvicted + 3,cs.getEvictedCount()); startEvicted+=3; conf.setBoolean("hbase.rs.evictblocksonclose",false); cacheConf=new CacheConfig(conf); hsf=new StoreFile(this.fs,pathCowOn,conf,cacheConf,BloomType.NONE); reader=hsf.createReader(); reader.close(cacheConf.shouldEvictOnClose()); assertEquals(startHit,cs.getHitCount()); assertEquals(startMiss,cs.getMissCount()); assertEquals(startEvicted,cs.getEvictedCount()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteFamilyBloomFilter() throws Exception { FileSystem fs=FileSystem.getLocal(conf); conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,(float)0.01); conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED,true); float err=conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,0); Path f=new Path(ROOT_DIR,getName()); HFileContext meta=new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,this.fs).withFilePath(f).withMaxKeyCount(2000).withFileContext(meta).build(); long now=System.currentTimeMillis(); for (int i=0; i < 2000; i+=2) { String row=String.format(localFormatter,i); KeyValue kv=new KeyValue(row.getBytes(),"family".getBytes(),"col".getBytes(),now,KeyValue.Type.DeleteFamily,"value".getBytes()); writer.append(kv); } writer.close(); StoreFile.Reader reader=new StoreFile.Reader(fs,f,cacheConf,conf); reader.loadFileInfo(); reader.loadBloomfilter(); int falsePos=0; int falseNeg=0; for (int i=0; i < 2000; i++) { String row=String.format(localFormatter,i); byte[] rowKey=Bytes.toBytes(row); boolean exists=reader.passesDeleteFamilyBloomFilter(rowKey,0,rowKey.length); if (i % 2 == 0) { if (!exists) falseNeg++; } else { if (exists) falsePos++; } } assertEquals(1000,reader.getDeleteFamilyCnt()); reader.close(true); fs.delete(f,true); assertEquals("False negatives: " + falseNeg,0,falseNeg); int maxFalsePos=(int)(2 * 2000 * err); assertTrue("Too many false positives: " + falsePos + " (err="+ err+ ", expected no more than "+ maxFalsePos,falsePos <= maxFalsePos); }

InternalCallVerifier BooleanVerifier 
@Test public void testEmptyStoreFileRestrictKeyRanges() throws Exception { StoreFile.Reader reader=mock(StoreFile.Reader.class); Store store=mock(Store.class); HColumnDescriptor hcd=mock(HColumnDescriptor.class); byte[] cf=Bytes.toBytes("ty"); when(hcd.getName()).thenReturn(cf); when(store.getFamily()).thenReturn(hcd); StoreFileScanner scanner=new StoreFileScanner(reader,mock(HFileScanner.class),false,false,0); Scan scan=new Scan(); scan.setColumnFamilyTimeRange(cf,0,1); assertFalse(scanner.shouldUseScanner(scan,store,0)); }

Class: org.apache.hadoop.hbase.regionserver.TestStoreFileInfo

InternalCallVerifier EqualityVerifier 
@Test public void testEqualsWithLink() throws IOException { Path origin=new Path("/origin"); Path tmp=new Path("/tmp"); Path mob=new Path("/mob"); Path archive=new Path("/archive"); HFileLink link1=new HFileLink(new Path(origin,"f1"),new Path(tmp,"f1"),new Path(mob,"f1"),new Path(archive,"f1")); HFileLink link2=new HFileLink(new Path(origin,"f1"),new Path(tmp,"f1"),new Path(mob,"f1"),new Path(archive,"f1")); StoreFileInfo info1=new StoreFileInfo(TEST_UTIL.getConfiguration(),TEST_UTIL.getTestFileSystem(),null,link1); StoreFileInfo info2=new StoreFileInfo(TEST_UTIL.getConfiguration(),TEST_UTIL.getTestFileSystem(),null,link2); assertEquals(info1,info2); assertEquals(info1.hashCode(),info2.hashCode()); }

Class: org.apache.hadoop.hbase.regionserver.TestStoreFileScannerWithTagCompression

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReseek() throws Exception { Path f=new Path(ROOT_DIR,"testReseek"); HFileContext meta=new HFileContextBuilder().withBlockSize(8 * 1024).withIncludesTags(true).withCompressTags(true).withDataBlockEncoding(DataBlockEncoding.PREFIX).build(); StoreFile.Writer writer=new StoreFile.WriterBuilder(conf,cacheConf,fs).withFilePath(f).withFileContext(meta).build(); writeStoreFile(writer); writer.close(); StoreFile.Reader reader=new StoreFile.Reader(fs,f,cacheConf,conf); StoreFileScanner s=reader.getStoreFileScanner(false,false); try { KeyValue k=KeyValueUtil.createFirstOnRow(Bytes.toBytes("k2")); s.reseek(k); Cell kv=s.next(); kv=s.next(); kv=s.next(); byte[] key5=Bytes.toBytes("k5"); assertTrue(Bytes.equals(key5,0,key5.length,kv.getRowArray(),kv.getRowOffset(),kv.getRowLength())); List tags=KeyValueUtil.ensureKeyValue(kv).getTags(); assertEquals(1,tags.size()); assertEquals("tag3",Bytes.toString(TagUtil.cloneValue(tags.get(0)))); } finally { s.close(); } }

Class: org.apache.hadoop.hbase.regionserver.TestStripeStoreEngine

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompactionContextForceSelect() throws Exception { Configuration conf=HBaseConfiguration.create(); int targetCount=2; conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY,targetCount); conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY,2); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY,TestStoreEngine.class.getName()); TestStoreEngine se=createEngine(conf); StripeCompactor mockCompactor=mock(StripeCompactor.class); se.setCompactorOverride(mockCompactor); when(mockCompactor.compact(any(CompactionRequest.class),anyInt(),anyLong(),any(byte[].class),any(byte[].class),any(byte[].class),any(byte[].class),any(ThroughputController.class),any(User.class))).thenReturn(new ArrayList()); StoreFile sf=createFile(); ArrayList compactUs=al(sf,createFile(),createFile()); se.getStoreFileManager().loadFiles(compactUs); CompactionContext compaction=se.createCompaction(); compaction.select(al(),false,false,false); assertEquals(3,compaction.getRequest().getFiles().size()); compactUs.remove(sf); CompactionRequest req=new CompactionRequest(compactUs); compaction.forceSelect(req); assertEquals(2,compaction.getRequest().getFiles().size()); assertFalse(compaction.getRequest().getFiles().contains(sf)); compaction.compact(NoLimitThroughputController.INSTANCE); verify(mockCompactor,times(1)).compact(compaction.getRequest(),targetCount,0L,StripeStoreFileManager.OPEN_KEY,StripeStoreFileManager.OPEN_KEY,null,null,NoLimitThroughputController.INSTANCE,null); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCreateBasedOnConfig() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY,TestStoreEngine.class.getName()); StripeStoreEngine se=createEngine(conf); assertTrue(se.getCompactionPolicy() instanceof StripeCompactionPolicy); }

Class: org.apache.hadoop.hbase.regionserver.TestStripeStoreFileManager

InternalCallVerifier EqualityVerifier 
@Test public void testCompactionAndFlushConflict() throws Exception { StripeStoreFileManager sfm=createManager(); assertEquals(0,sfm.getStripeCount()); StoreFile sf_i2c=createFile(OPEN_KEY,KEY_C), sf_c2i=createFile(KEY_C,OPEN_KEY); sfm.insertNewFiles(al(sf_i2c,sf_c2i)); assertEquals(2,sfm.getStripeCount()); StoreFile sf_i2d=createFile(OPEN_KEY,KEY_D), sf_d2i=createFile(KEY_D,OPEN_KEY); sfm.insertNewFiles(al(sf_i2d,sf_d2i)); assertEquals(2,sfm.getStripeCount()); assertEquals(2,sfm.getLevel0Files().size()); verifyGetAndScanScenario(sfm,KEY_C,KEY_C,sf_i2d,sf_d2i,sf_c2i); sfm.addCompactionResults(al(sf_i2d,sf_d2i),al()); sfm.removeCompactedFiles(al(sf_i2d,sf_d2i)); assertEquals(0,sfm.getLevel0Files().size()); StoreFile sf_i2c_2=createFile(OPEN_KEY,KEY_C); sfm.insertNewFiles(al(sf_i2c_2)); sfm.addCompactionResults(al(sf_i2c,sf_c2i),al(sf_i2d,sf_d2i)); sfm.removeCompactedFiles(al(sf_i2c,sf_c2i)); assertEquals(1,sfm.getLevel0Files().size()); verifyGetAndScanScenario(sfm,KEY_C,KEY_C,sf_i2d,sf_i2c_2); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRowKeyBefore() throws Exception { StripeStoreFileManager manager=createManager(); StoreFile l0File=createFile(), l0File2=createFile(); manager.insertNewFiles(al(l0File)); manager.insertNewFiles(al(l0File2)); Iterator sfs=manager.getCandidateFilesForRowKeyBefore(KV_B); sfs.next(); sfs.remove(); sfs=manager.updateCandidateFilesForRowKeyBefore(sfs,KV_B,KV_A); assertTrue(sfs.hasNext()); MockStoreFile stripe0a=createFile(0,100,OPEN_KEY,KEY_B), stripe1=createFile(KEY_B,OPEN_KEY); manager.addCompactionResults(al(l0File),al(stripe0a,stripe1)); manager.removeCompactedFiles(al(l0File)); ArrayList sfsDump=dumpIterator(manager.getCandidateFilesForRowKeyBefore(KV_A)); assertEquals(2,sfsDump.size()); assertTrue(sfsDump.contains(stripe0a)); assertFalse(sfsDump.contains(stripe1)); sfsDump=dumpIterator(manager.getCandidateFilesForRowKeyBefore(KV_B)); assertEquals(3,sfsDump.size()); assertTrue(sfsDump.contains(stripe1)); sfsDump=dumpIterator(manager.getCandidateFilesForRowKeyBefore(KV_D)); assertEquals(3,sfsDump.size()); sfs=manager.getCandidateFilesForRowKeyBefore(KV_D); sfs.next(); sfs.remove(); sfs=manager.updateCandidateFilesForRowKeyBefore(sfs,KV_D,KV_C); assertEquals(stripe1,sfs.next()); assertFalse(sfs.hasNext()); StoreFile stripe0b=createFile(0,101,OPEN_KEY,KEY_B); manager.addCompactionResults(al(l0File2),al(stripe0b)); manager.removeCompactedFiles(al(l0File2)); sfs=manager.getCandidateFilesForRowKeyBefore(KV_A); assertEquals(stripe0b,sfs.next()); sfs.remove(); sfs=manager.updateCandidateFilesForRowKeyBefore(sfs,KV_A,KV_A); assertEquals(stripe0a,sfs.next()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void testLoadFilesWithRecoverableBadFiles() throws Exception { ArrayList validStripeFiles=al(createFile(OPEN_KEY,KEY_B),createFile(KEY_B,KEY_C),createFile(KEY_C,OPEN_KEY),createFile(KEY_C,OPEN_KEY)); ArrayList filesToGoToL0=al(createFile(),createFile(null,KEY_A),createFile(KEY_D,null),createFile(KEY_D,KEY_A),createFile(keyAfter(KEY_A),KEY_C),createFile(OPEN_KEY,KEY_D),createFile(KEY_D,keyAfter(KEY_D))); ArrayList allFilesToGo=flattenLists(validStripeFiles,filesToGoToL0); Collections.shuffle(allFilesToGo); StripeStoreFileManager manager=createManager(allFilesToGo); List l0Files=manager.getLevel0Files(); assertEquals(filesToGoToL0.size(),l0Files.size()); for ( StoreFile sf : filesToGoToL0) { assertTrue(l0Files.contains(sf)); } verifyAllFiles(manager,allFilesToGo); }

InternalCallVerifier EqualityVerifier 
@Test public void testClearFiles() throws Exception { StripeStoreFileManager manager=createManager(); manager.insertNewFiles(al(createFile())); manager.insertNewFiles(al(createFile())); manager.addCompactionResults(al(),al(createFile(OPEN_KEY,KEY_B),createFile(KEY_B,OPEN_KEY))); assertEquals(4,manager.getStorefileCount()); Collection allFiles=manager.clearFiles(); assertEquals(4,allFiles.size()); assertEquals(0,manager.getStorefileCount()); assertEquals(0,manager.getStorefiles().size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLoadFilesWithGaps() throws Exception { StripeStoreFileManager manager=createManager(al(createFile(OPEN_KEY,KEY_B),createFile(KEY_C,OPEN_KEY))); assertEquals(2,manager.getLevel0Files().size()); manager=createManager(al(createFile(OPEN_KEY,OPEN_KEY))); assertEquals(0,manager.getLevel0Files().size()); assertEquals(1,manager.getStorefileCount()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetSplitPointEdgeCases() throws Exception { StripeStoreFileManager manager=createManager(); assertNull(manager.getSplitPoint()); MockStoreFile sf5=createFile(5,0); sf5.splitPoint=new byte[1]; manager.insertNewFiles(al(sf5)); manager.insertNewFiles(al(createFile(1,0))); assertEquals(sf5.splitPoint,manager.getSplitPoint()); manager.addCompactionResults(al(),al(createFile(2,0,OPEN_KEY,OPEN_KEY))); assertEquals(sf5.splitPoint,manager.getSplitPoint()); MockStoreFile sf6=createFile(6,0,OPEN_KEY,OPEN_KEY); sf6.splitPoint=new byte[1]; manager.addCompactionResults(al(),al(sf6)); assertEquals(sf6.splitPoint,manager.getSplitPoint()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInsertFilesIntoL0() throws Exception { StripeStoreFileManager manager=createManager(); MockStoreFile sf=createFile(); manager.insertNewFiles(al(sf)); assertEquals(1,manager.getStorefileCount()); Collection filesForGet=manager.getFilesForScanOrGet(true,KEY_A,KEY_A); assertEquals(1,filesForGet.size()); assertTrue(filesForGet.contains(sf)); manager.addCompactionResults(al(),al(createFile(OPEN_KEY,KEY_B),createFile(KEY_B,OPEN_KEY))); assertTrue(manager.getFilesForScanOrGet(true,KEY_A,KEY_A).contains(sf)); assertTrue(manager.getFilesForScanOrGet(true,KEY_C,KEY_C).contains(sf)); }

Class: org.apache.hadoop.hbase.regionserver.TestTags

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTagsWithAppendAndIncrement() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); byte[] f=Bytes.toBytes("f"); byte[] q=Bytes.toBytes("q"); byte[] row1=Bytes.toBytes("r1"); byte[] row2=Bytes.toBytes("r2"); HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor colDesc=new HColumnDescriptor(f); desc.addFamily(colDesc); TEST_UTIL.getHBaseAdmin().createTable(desc); Table table=null; try { table=TEST_UTIL.getConnection().getTable(tableName); Put put=new Put(row1); byte[] v=Bytes.toBytes(2L); put.addColumn(f,q,v); put.setAttribute("visibility",Bytes.toBytes("tag1")); table.put(put); Increment increment=new Increment(row1); increment.addColumn(f,q,1L); table.increment(increment); TestCoprocessorForTags.checkTagPresence=true; ResultScanner scanner=table.getScanner(new Scan()); Result result=scanner.next(); KeyValue kv=KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f,q)); List tags=TestCoprocessorForTags.tags; assertEquals(3L,Bytes.toLong(kv.getValueArray(),kv.getValueOffset(),kv.getValueLength())); assertEquals(1,tags.size()); assertEquals("tag1",Bytes.toString(TagUtil.cloneValue(tags.get(0)))); TestCoprocessorForTags.checkTagPresence=false; TestCoprocessorForTags.tags=null; increment=new Increment(row1); increment.add(new KeyValue(row1,f,q,1234L,v)); increment.setAttribute("visibility",Bytes.toBytes("tag2")); table.increment(increment); TestCoprocessorForTags.checkTagPresence=true; scanner=table.getScanner(new Scan()); result=scanner.next(); kv=KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f,q)); tags=TestCoprocessorForTags.tags; assertEquals(5L,Bytes.toLong(kv.getValueArray(),kv.getValueOffset(),kv.getValueLength())); assertEquals(2,tags.size()); List tagValues=new ArrayList(); for ( Tag tag : tags) { tagValues.add(Bytes.toString(TagUtil.cloneValue(tag))); } assertTrue(tagValues.contains("tag1")); assertTrue(tagValues.contains("tag2")); TestCoprocessorForTags.checkTagPresence=false; TestCoprocessorForTags.tags=null; put=new Put(row2); v=Bytes.toBytes(2L); put.addColumn(f,q,v); table.put(put); increment=new Increment(row2); increment.add(new KeyValue(row2,f,q,1234L,v)); increment.setAttribute("visibility",Bytes.toBytes("tag2")); table.increment(increment); Scan scan=new Scan(); scan.setStartRow(row2); TestCoprocessorForTags.checkTagPresence=true; scanner=table.getScanner(scan); result=scanner.next(); kv=KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f,q)); tags=TestCoprocessorForTags.tags; assertEquals(4L,Bytes.toLong(kv.getValueArray(),kv.getValueOffset(),kv.getValueLength())); assertEquals(1,tags.size()); assertEquals("tag2",Bytes.toString(TagUtil.cloneValue(tags.get(0)))); TestCoprocessorForTags.checkTagPresence=false; TestCoprocessorForTags.tags=null; byte[] row3=Bytes.toBytes("r3"); put=new Put(row3); put.addColumn(f,q,Bytes.toBytes("a")); put.setAttribute("visibility",Bytes.toBytes("tag1")); table.put(put); Append append=new Append(row3); append.add(f,q,Bytes.toBytes("b")); table.append(append); scan=new Scan(); scan.setStartRow(row3); TestCoprocessorForTags.checkTagPresence=true; scanner=table.getScanner(scan); result=scanner.next(); kv=KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f,q)); tags=TestCoprocessorForTags.tags; assertEquals(1,tags.size()); assertEquals("tag1",Bytes.toString(TagUtil.cloneValue(tags.get(0)))); TestCoprocessorForTags.checkTagPresence=false; TestCoprocessorForTags.tags=null; append=new Append(row3); append.add(new KeyValue(row3,f,q,1234L,v)); append.setAttribute("visibility",Bytes.toBytes("tag2")); table.append(append); TestCoprocessorForTags.checkTagPresence=true; scanner=table.getScanner(scan); result=scanner.next(); kv=KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f,q)); tags=TestCoprocessorForTags.tags; assertEquals(2,tags.size()); tagValues.clear(); for ( Tag tag : tags) { tagValues.add(Bytes.toString(TagUtil.cloneValue(tag))); } assertTrue(tagValues.contains("tag1")); assertTrue(tagValues.contains("tag2")); TestCoprocessorForTags.checkTagPresence=false; TestCoprocessorForTags.tags=null; byte[] row4=Bytes.toBytes("r4"); put=new Put(row4); put.addColumn(f,q,Bytes.toBytes("a")); table.put(put); append=new Append(row4); append.add(new KeyValue(row4,f,q,1234L,v)); append.setAttribute("visibility",Bytes.toBytes("tag2")); table.append(append); scan=new Scan(); scan.setStartRow(row4); TestCoprocessorForTags.checkTagPresence=true; scanner=table.getScanner(scan); result=scanner.next(); kv=KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f,q)); tags=TestCoprocessorForTags.tags; assertEquals(1,tags.size()); assertEquals("tag2",Bytes.toString(TagUtil.cloneValue(tags.get(0)))); } finally { TestCoprocessorForTags.checkTagPresence=false; TestCoprocessorForTags.tags=null; if (table != null) { table.close(); } } }

InternalCallVerifier BooleanVerifier 
@Test public void testFlushAndCompactionWithoutTags() throws Exception { Table table=null; try { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); byte[] fam=Bytes.toBytes("info"); byte[] row=Bytes.toBytes("rowa"); byte[] qual=Bytes.toBytes("qual"); byte[] row1=Bytes.toBytes("rowb"); byte[] row2=Bytes.toBytes("rowc"); HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setBlockCacheEnabled(true); colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE); desc.addFamily(colDesc); Admin admin=TEST_UTIL.getHBaseAdmin(); admin.createTable(desc); table=TEST_UTIL.getConnection().getTable(tableName); Put put=new Put(row); byte[] value=Bytes.toBytes("value"); put.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,value); table.put(put); admin.flush(tableName); Thread.sleep(1000); Put put1=new Put(row1); byte[] value1=Bytes.toBytes("1000dfsdf"); put1.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,value1); table.put(put1); admin.flush(tableName); Thread.sleep(1000); Put put2=new Put(row2); byte[] value2=Bytes.toBytes("1000dfsdf"); put2.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,value2); table.put(put2); admin.flush(tableName); Thread.sleep(1000); Scan s=new Scan(row); ResultScanner scanner=table.getScanner(s); try { Result[] next=scanner.next(3); for ( Result result : next) { CellScanner cellScanner=result.cellScanner(); cellScanner.advance(); KeyValue current=(KeyValue)cellScanner.current(); assertTrue(current.getValueOffset() + current.getValueLength() == current.getLength()); } } finally { if (scanner != null) scanner.close(); } admin.compact(tableName); while (admin.getCompactionState(tableName) != CompactionState.NONE) { Thread.sleep(10); } s=new Scan(row); scanner=table.getScanner(s); try { Result[] next=scanner.next(3); for ( Result result : next) { CellScanner cellScanner=result.cellScanner(); cellScanner.advance(); KeyValue current=(KeyValue)cellScanner.current(); assertTrue(current.getValueOffset() + current.getValueLength() == current.getLength()); } } finally { if (scanner != null) { scanner.close(); } } } finally { if (table != null) { table.close(); } } }

Class: org.apache.hadoop.hbase.regionserver.TestTimeRangeTracker

InternalCallVerifier BooleanVerifier 
@Test public void testSimpleInRange(){ TimeRangeTracker trr=new TimeRangeTracker(); trr.includeTimestamp(0); trr.includeTimestamp(2); assertTrue(trr.includesTimeRange(new TimeRange(1))); }

InternalCallVerifier BooleanVerifier 
/** * Run a bunch of threads against a single TimeRangeTracker and ensure we arrive * at right range. Here we do ten threads each incrementing over 100k at an offset * of the thread index; max is 10 * 10k and min is 0. * @throws InterruptedException */ @Test public void testArriveAtRightAnswer() throws InterruptedException { final TimeRangeTracker trr=new TimeRangeTracker(); final int threadCount=10; final int calls=1000 * 1000; Thread[] threads=new Thread[threadCount]; for (int i=0; i < threads.length; i++) { Thread t=new Thread("" + i){ @Override public void run(){ int offset=Integer.parseInt(getName()); boolean even=offset % 2 == 0; if (even) { for (int i=(offset * calls); i < calls; i++) trr.includeTimestamp(i); } else { int base=offset * calls; for (int i=base + calls; i >= base; i--) trr.includeTimestamp(i); } } } ; t.start(); threads[i]=t; } for (int i=0; i < threads.length; i++) { threads[i].join(); } assertTrue(trr.getMaximumTimestamp() == calls * threadCount); assertTrue(trr.getMinimumTimestamp() == 0); }

InternalCallVerifier BooleanVerifier 
@Test public void testAlwaysDecrementingSetsMaximum(){ TimeRangeTracker trr=new TimeRangeTracker(); trr.includeTimestamp(3); trr.includeTimestamp(2); trr.includeTimestamp(1); assertTrue(trr.getMinimumTimestamp() != TimeRangeTracker.INITIAL_MINIMUM_TIMESTAMP); assertTrue(trr.getMaximumTimestamp() != -1); }

Class: org.apache.hadoop.hbase.regionserver.TestWALLockup

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Reproduce locking up that happens when we get an inopportune sync during setup for * zigzaglatch wait. See HBASE-14317. If below is broken, we will see this test timeout because * it is locked up. *

First I need to set up some mocks for Server and RegionServerServices. I also need to * set up a dodgy WAL that will throw an exception when we go to append to it. */ @Test(timeout=20000) public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { class DodgyFSLog extends FSHLog { volatile boolean throwException=false; CountDownLatch latch=new CountDownLatch(1); public DodgyFSLog( FileSystem fs, Path root, String logDir, Configuration conf) throws IOException { super(fs,root,logDir,conf); } @Override protected void afterCreatingZigZagLatch(){ if (throwException) { try { LOG.info("LATCHED"); if (!this.latch.await(5,TimeUnit.SECONDS)) { LOG.warn("GIVE UP! Failed waiting on latch...Test is ABORTED!"); } } catch ( InterruptedException e) { e.printStackTrace(); } } } @Override protected void beforeWaitOnSafePoint(){ if (throwException) { LOG.info("COUNTDOWN"); while (this.latch.getCount() <= 0) Threads.sleep(1); this.latch.countDown(); } } @Override protected Writer createWriterInstance( Path path) throws IOException { final Writer w=super.createWriterInstance(path); return new Writer(){ @Override public void close() throws IOException { w.close(); } @Override public void sync() throws IOException { if (throwException) { throw new IOException("FAKE! Failed to replace a bad datanode...SYNC"); } w.sync(); } @Override public void append( Entry entry) throws IOException { if (throwException) { throw new IOException("FAKE! Failed to replace a bad datanode...APPEND"); } w.append(entry); } @Override public long getLength() throws IOException { return w.getLength(); } } ; } } Server server=Mockito.mock(Server.class); Mockito.when(server.getConfiguration()).thenReturn(CONF); Mockito.when(server.isStopped()).thenReturn(false); Mockito.when(server.isAborted()).thenReturn(false); RegionServerServices services=Mockito.mock(RegionServerServices.class); FileSystem fs=FileSystem.get(CONF); Path rootDir=new Path(dir + getName()); DodgyFSLog dodgyWAL=new DodgyFSLog(fs,rootDir,getName(),CONF); Path originalWAL=dodgyWAL.getCurrentFileName(); LogRoller logRoller=new LogRoller(server,services); logRoller.addWAL(dodgyWAL); logRoller.start(); HTableDescriptor htd=new HTableDescriptor(TableName.META_TABLE_NAME); final HRegion region=initHRegion(tableName,null,null,dodgyWAL); byte[] bytes=Bytes.toBytes(getName()); try { Put put=new Put(bytes); put.addColumn(COLUMN_FAMILY_BYTES,Bytes.toBytes("1"),bytes); WALKey key=new WALKey(region.getRegionInfo().getEncodedNameAsBytes(),htd.getTableName()); WALEdit edit=new WALEdit(); CellScanner CellScanner=put.cellScanner(); assertTrue(CellScanner.advance()); edit.add(CellScanner.current()); for (int i=0; i < 1000; i++) { region.put(put); } LOG.info("SET throwing of exception on append"); dodgyWAL.throwException=true; dodgyWAL.append(htd,region.getRegionInfo(),key,edit,true); boolean exception=false; try { dodgyWAL.sync(); } catch ( Exception e) { exception=true; } assertTrue("Did not get sync exception",exception); Thread t=new Thread("Flusher"){ public void run(){ try { if (region.getMemstoreSize() <= 0) { throw new IOException("memstore size=" + region.getMemstoreSize()); } region.flush(false); } catch ( IOException e) { LOG.info("In flush",e); } LOG.info("Exiting"); } } ; t.setDaemon(true); t.start(); while (dodgyWAL.latch.getCount() > 0) Threads.sleep(1); assertTrue(originalWAL != dodgyWAL.getCurrentFileName()); dodgyWAL.throwException=false; try { region.put(put); } catch ( Exception e) { LOG.info("In the put",e); } } finally { Mockito.when(server.isStopped()).thenReturn(true); if (logRoller != null) logRoller.interrupt(); try { if (region != null) region.close(); if (dodgyWAL != null) dodgyWAL.close(); } catch ( Exception e) { LOG.info("On way out",e); } } }


Class: org.apache.hadoop.hbase.regionserver.compactions.TestCompactedHFilesDischarger

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCleanerWithParallelScannersAfterCompaction() throws Exception { CompactedHFilesDischarger cleaner=new CompactedHFilesDischarger(1000,(Stoppable)null,rss,false); for (int i=1; i < 10; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); for (int i=11; i < 20; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); for (int i=21; i < 30; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); Store store=region.getStore(fam); assertEquals(3,store.getStorefilesCount()); Collection storefiles=store.getStorefiles(); Collection compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); for ( StoreFile file : storefiles) { assertFalse(file.isCompactedAway()); } region.compact(true); startScannerThreads(); storefiles=store.getStorefiles(); int usedReaderCount=0; int unusedReaderCount=0; for ( StoreFile file : storefiles) { if (file.getRefCount() == 3) { usedReaderCount++; } } compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); for ( StoreFile file : compactedfiles) { assertEquals("Refcount should be 3",0,file.getRefCount()); unusedReaderCount++; } assertEquals("unused reader count should be 3",3,unusedReaderCount); assertEquals("used reader count should be 1",1,usedReaderCount); cleaner.chore(); countDown(); assertEquals(1,store.getStorefilesCount()); storefiles=store.getStorefiles(); for ( StoreFile file : storefiles) { assertFalse(file.isCompactedAway()); } compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); assertTrue(compactedfiles.size() == 0); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCleanerWithParallelScanners() throws Exception { CompactedHFilesDischarger cleaner=new CompactedHFilesDischarger(1000,(Stoppable)null,rss,false); for (int i=1; i < 10; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); for (int i=11; i < 20; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); for (int i=21; i < 30; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); Store store=region.getStore(fam); assertEquals(3,store.getStorefilesCount()); Collection storefiles=store.getStorefiles(); Collection compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); for ( StoreFile file : storefiles) { assertFalse(file.isCompactedAway()); } startScannerThreads(); region.compact(true); storefiles=store.getStorefiles(); int usedReaderCount=0; int unusedReaderCount=0; for ( StoreFile file : storefiles) { if (file.getRefCount() == 0) { unusedReaderCount++; } } compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); for ( StoreFile file : compactedfiles) { assertEquals("Refcount should be 3",3,file.getRefCount()); usedReaderCount++; } assertEquals("unused reader count should be 1",1,unusedReaderCount); assertEquals("used reader count should be 3",3,usedReaderCount); cleaner.chore(); countDown(); assertEquals(1,store.getStorefilesCount()); assertEquals(3,((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles().size()); while (scanCompletedCounter.get() != 3) { Thread.sleep(100); } latch=new CountDownLatch(3); scanCompletedCounter.set(0); counter.set(0); startScannerThreads(); storefiles=store.getStorefiles(); usedReaderCount=0; unusedReaderCount=0; for ( StoreFile file : storefiles) { if (file.getRefCount() == 3) { usedReaderCount++; } } compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); for ( StoreFile file : compactedfiles) { assertEquals("Refcount should be 0",0,file.getRefCount()); unusedReaderCount++; } assertEquals("unused reader count should be 3",3,unusedReaderCount); assertEquals("used reader count should be 1",1,usedReaderCount); countDown(); while (scanCompletedCounter.get() != 3) { Thread.sleep(100); } cleaner.chore(); assertEquals(1,store.getStorefilesCount()); storefiles=store.getStorefiles(); for ( StoreFile file : storefiles) { assertFalse(file.isCompactedAway()); } compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); assertTrue(compactedfiles.size() == 0); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompactedHFilesCleaner() throws Exception { CompactedHFilesDischarger cleaner=new CompactedHFilesDischarger(1000,(Stoppable)null,rss,false); for (int i=1; i < 10; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); for (int i=11; i < 20; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); for (int i=21; i < 30; i++) { Put p=new Put(Bytes.toBytes("row" + i)); p.addColumn(fam,qual1,val); region.put(p); } region.flush(true); Store store=region.getStore(fam); assertEquals(3,store.getStorefilesCount()); Collection storefiles=store.getStorefiles(); Collection compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); for ( StoreFile file : storefiles) { assertFalse(file.isCompactedAway()); } cleaner.chore(); storefiles=store.getStorefiles(); for ( StoreFile file : storefiles) { assertFalse(file.isCompactedAway()); } region.compact(true); assertEquals(1,store.getStorefilesCount()); assertEquals(3,((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles().size()); cleaner.chore(); assertEquals(1,store.getStorefilesCount()); storefiles=store.getStorefiles(); for ( StoreFile file : storefiles) { assertFalse(file.isCompactedAway()); } compactedfiles=((HStore)store).getStoreEngine().getStoreFileManager().getCompactedfiles(); assertTrue(compactedfiles.size() == 0); }

Class: org.apache.hadoop.hbase.regionserver.compactions.TestFIFOCompactionPolicy

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPurgeExpiredFiles() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.setInt(HStore.BLOCKING_STOREFILES_KEY,10000); TEST_UTIL.startMiniCluster(1); try { Store store=prepareData(); assertEquals(10,store.getStorefilesCount()); TEST_UTIL.getHBaseAdmin().majorCompact(tableName); while (store.getStorefilesCount() > 1) { Thread.sleep(100); } assertTrue(store.getStorefilesCount() == 1); } finally { TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.regionserver.compactions.TestOffPeakHours

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testSetPeakHourToTargetTime(){ conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_START_HOUR,hourMinusOne); conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_END_HOUR,hourPlusOne); OffPeakHours target=OffPeakHours.getInstance(conf); assertTrue(target.isOffPeakHour(hourOfDay)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testSetPeakHourOutsideCurrentSelection(){ conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_START_HOUR,hourMinusTwo); conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_END_HOUR,hourMinusOne); OffPeakHours target=OffPeakHours.getInstance(conf); assertFalse(target.isOffPeakHour(hourOfDay)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testWithoutSettings(){ Configuration conf=testUtil.getConfiguration(); OffPeakHours target=OffPeakHours.getInstance(conf); assertFalse(target.isOffPeakHour(hourOfDay)); }

Class: org.apache.hadoop.hbase.regionserver.compactions.TestStripeCompactionPolicy

InternalCallVerifier NullVerifier 
@Test public void testSplitOffStripe() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.unset("hbase.hstore.compaction.min.size"); conf.setInt(StripeStoreConfig.MIN_FILES_KEY,2); Long[] toSplit=new Long[]{defaultSplitSize - 2,1L,1L}; Long[] noSplit=new Long[]{defaultSplitSize - 2,1L}; long splitTargetSize=(long)(defaultSplitSize / defaultSplitCount); StripeCompactionPolicy.StripeInformationProvider si=createStripesWithSizes(0,0,new Long[]{defaultSplitSize - 2,2L}); assertNull(createPolicy(conf).selectCompaction(si,al(),false)); conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY,500f); StripeCompactionPolicy policy=createPolicy(conf); verifyWholeStripesCompaction(policy,si,0,0,null,2,splitTargetSize); si=createStripesWithSizes(0,0,noSplit,noSplit,toSplit); verifyWholeStripesCompaction(policy,si,2,2,null,2,splitTargetSize); si=createStripesWithSizes(0,0,noSplit,toSplit,noSplit); verifyWholeStripesCompaction(policy,si,1,1,null,2,splitTargetSize); StripeCompactionPolicy specPolicy=createPolicy(conf,defaultSplitSize + 1,defaultSplitCount,defaultInitialCount,false); verifySingleStripeCompaction(specPolicy,si,1,null); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testSingleStripeCompaction() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.unset("hbase.hstore.compaction.min.size"); conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY,1.0F); conf.setInt(StripeStoreConfig.MIN_FILES_KEY,3); conf.setInt(StripeStoreConfig.MAX_FILES_KEY,4); conf.setLong(StripeStoreConfig.SIZE_TO_SPLIT_KEY,1000); StoreConfigInformation sci=mock(StoreConfigInformation.class); StripeStoreConfig ssc=new StripeStoreConfig(conf,sci); StripeCompactionPolicy policy=new StripeCompactionPolicy(conf,sci,ssc){ @Override public StripeCompactionRequest selectCompaction( StripeInformationProvider si, List filesCompacting, boolean isOffpeak) throws IOException { if (!filesCompacting.isEmpty()) return null; return selectSingleStripeCompaction(si,false,false,isOffpeak); } @Override public boolean needsCompactions( StripeInformationProvider si, List filesCompacting){ if (!filesCompacting.isEmpty()) return false; return needsSingleStripeCompaction(si); } } ; StripeInformationProvider si=createStripesWithSizes(0,0,new Long[]{2L},new Long[]{3L,3L},new Long[]{5L,1L}); verifyNoCompaction(policy,si); si=createStripesWithSizes(0,0,new Long[]{2L},new Long[]{3L,3L},new Long[]{5L,1L,1L}); assertNull(policy.selectCompaction(si,al(),false)); assertTrue(policy.needsCompactions(si,al())); si=createStripesWithSizes(0,0,new Long[]{2L},new Long[]{3L,3L},new Long[]{5L,4L,3L}); verifySingleStripeCompaction(policy,si,2,null); si=createStripesWithSizes(0,0,new Long[]{3L,2L,2L},new Long[]{2L,2L,1L},new Long[]{3L,2L,2L,1L}); verifySingleStripeCompaction(policy,si,2,null); si=createStripesWithSizes(0,0,new Long[]{5L},new Long[]{3L,2L,2L,1L},new Long[]{3L,2L,2L}); verifySingleStripeCompaction(policy,si,1,null); si=createStripesWithSizes(0,0,new Long[]{3L,3L,3L},new Long[]{3L,1L,2L},new Long[]{3L,2L,2L}); verifySingleStripeCompaction(policy,si,1,null); si=createStripesWithSizes(0,0,new Long[]{5L},new Long[]{5L,4L,4L,4L,4L}); List sfs=si.getStripes().get(1).subList(1,5); verifyCompaction(policy,si,sfs,null,1,null,si.getStartRow(1),si.getEndRow(1),true); si=createStripesWithSizes(0,0,new Long[]{5L},new Long[]{50L,4L,4L,4L,4L}); sfs=si.getStripes().get(1).subList(1,5); verifyCompaction(policy,si,sfs,null,1,null,si.getStartRow(1),si.getEndRow(1),true); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@SuppressWarnings("unchecked") @Test public void testMergeExpiredFiles() throws Exception { ManualEnvironmentEdge edge=new ManualEnvironmentEdge(); long now=defaultTtl + 2; edge.setValue(now); EnvironmentEdgeManager.injectEdge(edge); try { StoreFile expiredFile=createFile(), notExpiredFile=createFile(); when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1); when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1); List expired=Lists.newArrayList(expiredFile,expiredFile); List notExpired=Lists.newArrayList(notExpiredFile,notExpiredFile); List mixed=Lists.newArrayList(expiredFile,notExpiredFile); StripeCompactionPolicy policy=createPolicy(HBaseConfiguration.create(),defaultSplitSize,defaultSplitCount,defaultInitialCount,true); StripeCompactionPolicy.StripeInformationProvider si=createStripesWithFiles(expired,expired,expired); verifyWholeStripesCompaction(policy,si,0,2,null,1,Long.MAX_VALUE,false); si=createStripesWithFiles(notExpired,notExpired,notExpired); assertNull(policy.selectCompaction(si,al(),false)); si=createStripesWithFiles(notExpired,expired,notExpired); verifyWholeStripesCompaction(policy,si,1,2,null,1,Long.MAX_VALUE,false); si=createStripesWithFiles(notExpired,expired,notExpired,expired,expired,notExpired); verifyWholeStripesCompaction(policy,si,3,4,null,1,Long.MAX_VALUE,false); si=createStripesWithFiles(expired,expired,notExpired,expired,mixed); verifyWholeStripesCompaction(policy,si,0,1,null,1,Long.MAX_VALUE,false); } finally { EnvironmentEdgeManager.reset(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testSplitOffStripeOffPeak() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.unset("hbase.hstore.compaction.min.size"); conf.setInt(StripeStoreConfig.MIN_FILES_KEY,2); StripeCompactionPolicy.StripeInformationProvider si=createStripesWithSizes(0,0,new Long[]{defaultSplitSize - 2,1L,1L}); assertEquals(2,createPolicy(conf).selectCompaction(si,al(),false).getRequest().getFiles().size()); conf.setFloat("hbase.hstore.compaction.ratio.offpeak",500f); assertEquals(3,createPolicy(conf).selectCompaction(si,al(),true).getRequest().getFiles().size()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWithReferences() throws Exception { StripeCompactionPolicy policy=createPolicy(HBaseConfiguration.create()); StripeCompactor sc=mock(StripeCompactor.class); StoreFile ref=createFile(); when(ref.isReference()).thenReturn(true); StripeInformationProvider si=mock(StripeInformationProvider.class); Collection sfs=al(ref,createFile()); when(si.getStorefiles()).thenReturn(sfs); assertTrue(policy.needsCompactions(si,al())); StripeCompactionPolicy.StripeCompactionRequest scr=policy.selectCompaction(si,al(),false); assertEquals(si.getStorefiles(),scr.getRequest().getFiles()); scr.execute(sc,NoLimitThroughputController.INSTANCE,null); verify(sc,only()).compact(eq(scr.getRequest()),anyInt(),anyLong(),aryEq(OPEN_KEY),aryEq(OPEN_KEY),aryEq(OPEN_KEY),aryEq(OPEN_KEY),any(NoLimitThroughputController.class),any(User.class)); }

Class: org.apache.hadoop.hbase.regionserver.throttle.TestCompactionWithThroughputController

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test the tuning task of {@link PressureAwareCompactionThroughputController} */ @Test public void testThroughputTuning() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY,DefaultStoreEngine.class.getName()); conf.setLong(PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,20L * 1024 * 1024); conf.setLong(PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,10L * 1024 * 1024); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY,4); conf.setInt(HStore.BLOCKING_STOREFILES_KEY,6); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,PressureAwareCompactionThroughputController.class.getName()); conf.setInt(PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD,1000); TEST_UTIL.startMiniCluster(1); Connection conn=ConnectionFactory.createConnection(conf); try { HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family)); htd.setCompactionEnabled(false); TEST_UTIL.getHBaseAdmin().createTable(htd); TEST_UTIL.waitTableAvailable(tableName); HRegionServer regionServer=TEST_UTIL.getRSForFirstRegionInTable(tableName); PressureAwareCompactionThroughputController throughputController=(PressureAwareCompactionThroughputController)regionServer.compactSplitThread.getCompactionThroughputController(); assertEquals(10L * 1024 * 1024,throughputController.getMaxThroughput(),EPSILON); Table table=conn.getTable(tableName); for (int i=0; i < 5; i++) { byte[] value=new byte[0]; table.put(new Put(Bytes.toBytes(i)).addColumn(family,qualifier,value)); TEST_UTIL.flush(tableName); } Thread.sleep(2000); assertEquals(15L * 1024 * 1024,throughputController.getMaxThroughput(),EPSILON); byte[] value1=new byte[0]; table.put(new Put(Bytes.toBytes(5)).addColumn(family,qualifier,value1)); TEST_UTIL.flush(tableName); Thread.sleep(2000); assertEquals(20L * 1024 * 1024,throughputController.getMaxThroughput(),EPSILON); byte[] value=new byte[0]; table.put(new Put(Bytes.toBytes(6)).addColumn(family,qualifier,value)); TEST_UTIL.flush(tableName); Thread.sleep(2000); assertEquals(Double.MAX_VALUE,throughputController.getMaxThroughput(),EPSILON); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,NoLimitThroughputController.class.getName()); regionServer.compactSplitThread.onConfigurationChange(conf); assertTrue(throughputController.isStopped()); assertTrue(regionServer.compactSplitThread.getCompactionThroughputController() instanceof NoLimitThroughputController); } finally { conn.close(); TEST_UTIL.shutdownMiniCluster(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test the logic that we calculate compaction pressure for a striped store. */ @Test public void testGetCompactionPressureForStripedStore() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY,StripeStoreEngine.class.getName()); conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY,false); conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY,2); conf.setInt(StripeStoreConfig.MIN_FILES_KEY,4); conf.setInt(HStore.BLOCKING_STOREFILES_KEY,12); TEST_UTIL.startMiniCluster(1); Connection conn=ConnectionFactory.createConnection(conf); try { HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family)); htd.setCompactionEnabled(false); TEST_UTIL.getHBaseAdmin().createTable(htd); TEST_UTIL.waitTableAvailable(tableName); HStore store=(HStore)getStoreWithName(tableName); assertEquals(0,store.getStorefilesCount()); assertEquals(0.0,store.getCompactionPressure(),EPSILON); Table table=conn.getTable(tableName); for (int i=0; i < 4; i++) { byte[] value1=new byte[0]; table.put(new Put(Bytes.toBytes(i)).addColumn(family,qualifier,value1)); byte[] value=new byte[0]; table.put(new Put(Bytes.toBytes(100 + i)).addColumn(family,qualifier,value)); TEST_UTIL.flush(tableName); } assertEquals(8,store.getStorefilesCount()); assertEquals(0.0,store.getCompactionPressure(),EPSILON); byte[] value5=new byte[0]; table.put(new Put(Bytes.toBytes(4)).addColumn(family,qualifier,value5)); byte[] value4=new byte[0]; table.put(new Put(Bytes.toBytes(104)).addColumn(family,qualifier,value4)); TEST_UTIL.flush(tableName); assertEquals(10,store.getStorefilesCount()); assertEquals(0.5,store.getCompactionPressure(),EPSILON); byte[] value3=new byte[0]; table.put(new Put(Bytes.toBytes(5)).addColumn(family,qualifier,value3)); byte[] value2=new byte[0]; table.put(new Put(Bytes.toBytes(105)).addColumn(family,qualifier,value2)); TEST_UTIL.flush(tableName); assertEquals(12,store.getStorefilesCount()); assertEquals(1.0,store.getCompactionPressure(),EPSILON); byte[] value1=new byte[0]; table.put(new Put(Bytes.toBytes(6)).addColumn(family,qualifier,value1)); byte[] value=new byte[0]; table.put(new Put(Bytes.toBytes(106)).addColumn(family,qualifier,value)); TEST_UTIL.flush(tableName); assertEquals(14,store.getStorefilesCount()); assertEquals(2.0,store.getCompactionPressure(),EPSILON); } finally { conn.close(); TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.regionserver.throttle.TestFlushWithThroughputController

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the tuning task of {@link PressureAwareFlushThroughputController} */ @Test public void testFlushThroughputTuning() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY,DefaultStoreEngine.class.getName()); conf.setLong(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND,20L * 1024 * 1024); conf.setLong(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND,10L * 1024 * 1024); conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,PressureAwareFlushThroughputController.class.getName()); conf.setInt(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD,3000); TEST_UTIL.startMiniCluster(1); Connection conn=ConnectionFactory.createConnection(conf); try { HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family)); htd.setCompactionEnabled(false); TEST_UTIL.getHBaseAdmin().createTable(htd); TEST_UTIL.waitTableAvailable(tableName); HRegionServer regionServer=TEST_UTIL.getRSForFirstRegionInTable(tableName); PressureAwareFlushThroughputController throughputController=(PressureAwareFlushThroughputController)regionServer.getFlushThroughputController(); for ( Region region : regionServer.getOnlineRegions()) { region.flush(true); } assertEquals(0.0,regionServer.getFlushPressure(),EPSILON); Thread.sleep(5000); assertEquals(10L * 1024 * 1024,throughputController.getMaxThroughput(),EPSILON); Table table=conn.getTable(tableName); Random rand=new Random(); for (int i=0; i < 10; i++) { for (int j=0; j < 10; j++) { byte[] value=new byte[256 * 1024]; rand.nextBytes(value); table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family,qualifier,value)); } } Thread.sleep(5000); double expectedThroughPut=10L * 1024 * 1024* (1 + regionServer.getFlushPressure()); assertEquals(expectedThroughPut,throughputController.getMaxThroughput(),EPSILON); conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,NoLimitThroughputController.class.getName()); regionServer.onConfigurationChange(conf); assertTrue(throughputController.isStopped()); assertTrue(regionServer.getFlushThroughputController() instanceof NoLimitThroughputController); } finally { conn.close(); TEST_UTIL.shutdownMiniCluster(); } }

Class: org.apache.hadoop.hbase.regionserver.wal.TestCompressor

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCompressingWithClearDictionaries() throws IOException { ByteArrayOutputStream baos=new ByteArrayOutputStream(); DataOutputStream dos=new DataOutputStream(baos); Dictionary dictionary=new LRUDictionary(); dictionary.init(Short.MAX_VALUE); byte[] blahBytes=Bytes.toBytes("blah"); Compressor.writeCompressed(blahBytes,0,blahBytes.length,dos,dictionary); dos.close(); byte[] dosbytes=baos.toByteArray(); DataInputStream dis=new DataInputStream(new ByteArrayInputStream(dosbytes)); dictionary=new LRUDictionary(); dictionary.init(Short.MAX_VALUE); byte[] product=Compressor.readCompressed(dis,dictionary); assertTrue(Bytes.equals(blahBytes,product)); }

Class: org.apache.hadoop.hbase.regionserver.wal.TestCustomWALCellCodec

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that a custom {@link WALCellCodec} will be completely setup when it is instantiated via{@link WALCellCodec} * @throws Exception on failure */ @Test public void testCreatePreparesCodec() throws Exception { Configuration conf=new Configuration(false); conf.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY,CustomWALCellCodec.class,WALCellCodec.class); CustomWALCellCodec codec=(CustomWALCellCodec)WALCellCodec.create(conf,null,null); assertEquals("Custom codec didn't get initialized with the right configuration!",conf,codec.conf); assertEquals("Custom codec didn't get initialized with the right compression context!",null,codec.context); }

Class: org.apache.hadoop.hbase.regionserver.wal.TestDurability

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testIncrement() throws Exception { byte[] row1=Bytes.toBytes("row1"); byte[] col1=Bytes.toBytes("col1"); byte[] col2=Bytes.toBytes("col2"); byte[] col3=Bytes.toBytes("col3"); final WALFactory wals=new WALFactory(CONF,null,"TestIncrement"); byte[] tableName=Bytes.toBytes("TestIncrement"); final WAL wal=wals.getWAL(tableName,null); HRegion region=createHRegion(tableName,"increment",wal,Durability.USE_DEFAULT); Increment inc1=new Increment(row1); inc1.addColumn(FAMILY,col1,1); Result res=region.increment(inc1); assertEquals(1,res.size()); assertEquals(1,Bytes.toLong(res.getValue(FAMILY,col1))); verifyWALCount(wals,wal,1); inc1=new Increment(row1); inc1.addColumn(FAMILY,col1,0); res=region.increment(inc1); assertEquals(1,res.size()); assertEquals(1,Bytes.toLong(res.getValue(FAMILY,col1))); verifyWALCount(wals,wal,1); inc1=new Increment(row1); inc1.addColumn(FAMILY,col1,0); inc1.addColumn(FAMILY,col2,0); inc1.addColumn(FAMILY,col3,0); res=region.increment(inc1); assertEquals(3,res.size()); assertEquals(1,Bytes.toLong(res.getValue(FAMILY,col1))); assertEquals(0,Bytes.toLong(res.getValue(FAMILY,col2))); assertEquals(0,Bytes.toLong(res.getValue(FAMILY,col3))); verifyWALCount(wals,wal,1); inc1=new Increment(row1); inc1.addColumn(FAMILY,col1,5); inc1.addColumn(FAMILY,col2,4); inc1.addColumn(FAMILY,col3,3); res=region.increment(inc1); assertEquals(3,res.size()); assertEquals(6,Bytes.toLong(res.getValue(FAMILY,col1))); assertEquals(4,Bytes.toLong(res.getValue(FAMILY,col2))); assertEquals(3,Bytes.toLong(res.getValue(FAMILY,col3))); verifyWALCount(wals,wal,2); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testIncrementWithReturnResultsSetToFalse() throws Exception { byte[] row1=Bytes.toBytes("row1"); byte[] col1=Bytes.toBytes("col1"); final WALFactory wals=new WALFactory(CONF,null,"testIncrementWithReturnResultsSetToFalse"); byte[] tableName=Bytes.toBytes("testIncrementWithReturnResultsSetToFalse"); final WAL wal=wals.getWAL(tableName,null); HRegion region=createHRegion(tableName,"increment",wal,Durability.USE_DEFAULT); Increment inc1=new Increment(row1); inc1.setReturnResults(false); inc1.addColumn(FAMILY,col1,1); Result res=region.increment(inc1); assertNull(res); }

Class: org.apache.hadoop.hbase.regionserver.wal.TestFSHLog

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test flush for sure has a sequence id that is beyond the last edit appended. We do this * by slowing appends in the background ring buffer thread while in foreground we call * flush. The addition of the sync over HRegion in flush should fix an issue where flush was * returning before all of its appends had made it out to the WAL (HBASE-11109). * @throws IOException * @see HBASE-11109 */ @Test public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException { String testName="testFlushSequenceIdIsGreaterThanAllEditsInHFile"; final TableName tableName=TableName.valueOf(testName); final HRegionInfo hri=new HRegionInfo(tableName); final byte[] rowName=tableName.getName(); final HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("f")); HRegion r=HBaseTestingUtility.createRegionAndWAL(hri,TEST_UTIL.getDefaultRootDirPath(),TEST_UTIL.getConfiguration(),htd); HBaseTestingUtility.closeRegionAndWAL(r); final int countPerFamily=10; final MutableBoolean goslow=new MutableBoolean(false); FSHLog wal=new FSHLog(FileSystem.get(conf),TEST_UTIL.getDefaultRootDirPath(),testName,conf){ @Override void atHeadOfRingBufferEventHandlerAppend(){ if (goslow.isTrue()) { Threads.sleep(100); LOG.debug("Sleeping before appending 100ms"); } super.atHeadOfRingBufferEventHandlerAppend(); } } ; HRegion region=HRegion.openHRegion(TEST_UTIL.getConfiguration(),TEST_UTIL.getTestFileSystem(),TEST_UTIL.getDefaultRootDirPath(),hri,htd,wal); EnvironmentEdge ee=EnvironmentEdgeManager.getDelegate(); try { List puts=null; for ( HColumnDescriptor hcd : htd.getFamilies()) { puts=TestWALReplay.addRegionEdits(rowName,hcd.getName(),countPerFamily,ee,region,"x"); } final Get g=new Get(rowName); Result result=region.get(g); assertEquals(countPerFamily * htd.getFamilies().size(),result.size()); WALEdit edits=new WALEdit(); for ( Put p : puts) { CellScanner cs=p.cellScanner(); while (cs.advance()) { edits.add(cs.current()); } } List clusterIds=new ArrayList(); clusterIds.add(UUID.randomUUID()); goslow.setValue(true); for (int i=0; i < countPerFamily; i++) { final HRegionInfo info=region.getRegionInfo(); final WALKey logkey=new WALKey(info.getEncodedNameAsBytes(),tableName,System.currentTimeMillis(),clusterIds,-1,-1,region.getMVCC()); wal.append(htd,info,logkey,edits,true); } region.flush(true); long currentSequenceId=region.getReadPoint(null); goslow.setValue(false); synchronized (goslow) { goslow.notifyAll(); } assertTrue(currentSequenceId >= region.getReadPoint(null)); } finally { region.close(true); wal.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * On rolling a wal after reaching the threshold, {@link WAL#rollWriter()} returns the * list of regions which should be flushed in order to archive the oldest wal file. *

* This method tests this behavior by inserting edits and rolling the wal enough times to reach * the max number of logs threshold. It checks whether we get the "right regions" for flush on * rolling the wal. * @throws Exception */ @Test public void testFindMemStoresEligibleForFlush() throws Exception { LOG.debug("testFindMemStoresEligibleForFlush"); Configuration conf1=HBaseConfiguration.create(conf); conf1.setInt("hbase.regionserver.maxlogs",1); FSHLog wal=new FSHLog(fs,FSUtils.getRootDir(conf1),dir.toString(),HConstants.HREGION_OLDLOGDIR_NAME,conf1,null,true,null,null); HTableDescriptor t1=new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row")); HTableDescriptor t2=new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row")); HRegionInfo hri1=new HRegionInfo(t1.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); HRegionInfo hri2=new HRegionInfo(t2.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); try { addEdits(wal,hri1,t1,2,mvcc); wal.rollWriter(); addEdits(wal,hri1,t1,2,mvcc); wal.rollWriter(); assertTrue(wal.getNumRolledLogFiles() == 2); byte[][] regionsToFlush=wal.findRegionsToForceFlush(); assertEquals(1,regionsToFlush.length); assertEquals(hri1.getEncodedNameAsBytes(),regionsToFlush[0]); addEdits(wal,hri2,t2,2,mvcc); regionsToFlush=wal.findRegionsToForceFlush(); assertEquals(regionsToFlush.length,1); assertEquals(hri1.getEncodedNameAsBytes(),regionsToFlush[0]); flushRegion(wal,hri1.getEncodedNameAsBytes(),t1.getFamiliesKeys()); wal.rollWriter(); assertEquals(1,wal.getNumRolledLogFiles()); flushRegion(wal,hri2.getEncodedNameAsBytes(),t2.getFamiliesKeys()); wal.rollWriter(true); assertEquals(0,wal.getNumRolledLogFiles()); addEdits(wal,hri1,t1,2,mvcc); addEdits(wal,hri2,t2,2,mvcc); wal.rollWriter(); assertEquals(1,wal.getNumRolledLogFiles()); addEdits(wal,hri1,t1,2,mvcc); wal.rollWriter(); regionsToFlush=wal.findRegionsToForceFlush(); assertEquals(2,regionsToFlush.length); flushRegion(wal,hri1.getEncodedNameAsBytes(),t1.getFamiliesKeys()); flushRegion(wal,hri2.getEncodedNameAsBytes(),t2.getFamiliesKeys()); wal.rollWriter(true); assertEquals(0,wal.getNumRolledLogFiles()); addEdits(wal,hri1,t1,2,mvcc); wal.startCacheFlush(hri1.getEncodedNameAsBytes(),t1.getFamiliesKeys()); wal.rollWriter(); wal.completeCacheFlush(hri1.getEncodedNameAsBytes()); assertEquals(1,wal.getNumRolledLogFiles()); } finally { if (wal != null) { wal.close(); } } }


APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
/** * tests the log comparator. Ensure that we are not mixing meta logs with non-meta logs (throws * exception if we do). Comparison is based on the timestamp present in the wal name. * @throws Exception */ @Test public void testWALComparator() throws Exception { FSHLog wal1=null; FSHLog walMeta=null; try { wal1=new FSHLog(fs,FSUtils.getRootDir(conf),dir.toString(),HConstants.HREGION_OLDLOGDIR_NAME,conf,null,true,null,null); LOG.debug("Log obtained is: " + wal1); Comparator comp=wal1.LOG_NAME_COMPARATOR; Path p1=wal1.computeFilename(11); Path p2=wal1.computeFilename(12); assertTrue(comp.compare(p1,p1) == 0); assertTrue(comp.compare(p1,p2) < 0); walMeta=new FSHLog(fs,FSUtils.getRootDir(conf),dir.toString(),HConstants.HREGION_OLDLOGDIR_NAME,conf,null,true,null,DefaultWALProvider.META_WAL_PROVIDER_ID); Comparator compMeta=walMeta.LOG_NAME_COMPARATOR; Path p1WithMeta=walMeta.computeFilename(11); Path p2WithMeta=walMeta.computeFilename(12); assertTrue(compMeta.compare(p1WithMeta,p1WithMeta) == 0); assertTrue(compMeta.compare(p1WithMeta,p2WithMeta) < 0); boolean ex=false; try { comp.compare(p1WithMeta,p2); } catch ( IllegalArgumentException e) { ex=true; } assertTrue("Comparator doesn't complain while checking meta log files",ex); boolean exMeta=false; try { compMeta.compare(p1WithMeta,p2); } catch ( IllegalArgumentException e) { exMeta=true; } assertTrue("Meta comparator doesn't complain while checking log files",exMeta); } finally { if (wal1 != null) { wal1.close(); } if (walMeta != null) { walMeta.close(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier 
/** * A loaded WAL coprocessor won't break existing WAL test cases. */ @Test public void testWALCoprocessorLoaded() throws Exception { FSHLog log=null; try { log=new FSHLog(fs,FSUtils.getRootDir(conf),dir.toString(),HConstants.HREGION_OLDLOGDIR_NAME,conf,null,true,null,null); WALCoprocessorHost host=log.getCoprocessorHost(); Coprocessor c=host.findCoprocessor(SampleRegionWALObserver.class.getName()); assertNotNull(c); } finally { if (log != null) { log.close(); } } }

Class: org.apache.hadoop.hbase.regionserver.wal.TestKeyValueCompression

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testKVWithTags() throws Exception { CompressionContext ctx=new CompressionContext(LRUDictionary.class,false,false); DataOutputBuffer buf=new DataOutputBuffer(BUF_SIZE); KeyValueCompression.writeKV(buf,createKV(1),ctx); KeyValueCompression.writeKV(buf,createKV(0),ctx); KeyValueCompression.writeKV(buf,createKV(2),ctx); ctx.clear(); DataInputStream in=new DataInputStream(new ByteArrayInputStream(buf.getData(),0,buf.getLength())); KeyValue readBack=KeyValueCompression.readKV(in,ctx); List tags=readBack.getTags(); assertEquals(1,tags.size()); }

Class: org.apache.hadoop.hbase.regionserver.wal.TestLogRollAbort

InternalCallVerifier BooleanVerifier 
/** * Tests that RegionServer aborts if we hit an error closing the WAL when * there are unsynced WAL edits. See HBASE-4282. */ @Test public void testRSAbortWithUnflushedEdits() throws Exception { LOG.info("Starting testRSAbortWithUnflushedEdits()"); TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); TableName tableName=TableName.valueOf(this.getClass().getSimpleName()); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); Table table=TEST_UTIL.getConnection().getTable(desc.getTableName()); try { HRegionServer server=TEST_UTIL.getRSForFirstRegionInTable(tableName); WAL log=server.getWAL(null); assertTrue("Need append support for this test",FSUtils.isAppendSupported(TEST_UTIL.getConfiguration())); Put p=new Put(Bytes.toBytes("row2001")); p.addColumn(HConstants.CATALOG_FAMILY,Bytes.toBytes("col"),Bytes.toBytes(2001)); table.put(p); log.sync(); p=new Put(Bytes.toBytes("row2002")); p.addColumn(HConstants.CATALOG_FAMILY,Bytes.toBytes("col"),Bytes.toBytes(2002)); table.put(p); dfsCluster.restartDataNodes(); LOG.info("Restarted datanodes"); try { log.rollWriter(true); } catch ( FailedLogCloseException flce) { } catch ( Throwable t) { LOG.fatal("FAILED TEST: Got wrong exception",t); } } finally { table.close(); } }

Class: org.apache.hadoop.hbase.regionserver.wal.TestLogRolling

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Tests that logs are rolled upon detecting datanode death * Requires an HDFS jar with HDFS-826 & syncFs() support (HDFS-200) */ @Test public void testLogRollOnDatanodeDeath() throws Exception { TEST_UTIL.ensureSomeRegionServersAvailable(2); assertTrue("This test requires WAL file replication set to 2.",fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) == 2); LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); this.server=cluster.getRegionServer(0); HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(getName())); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); Table table=TEST_UTIL.getConnection().getTable(desc.getTableName()); assertTrue(((HTable)table).isAutoFlush()); server=TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); HRegionInfo region=server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo(); final FSHLog log=(FSHLog)server.getWAL(region); final AtomicBoolean lowReplicationHookCalled=new AtomicBoolean(false); log.registerWALActionsListener(new WALActionsListener.Base(){ @Override public void logRollRequested( boolean lowReplication){ if (lowReplication) { lowReplicationHookCalled.lazySet(true); } } } ); assertTrue("Need append support for this test",FSUtils.isAppendSupported(TEST_UTIL.getConfiguration())); List existingNodes=dfsCluster.getDataNodes(); int numDataNodes=3; dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(),numDataNodes,true,null,null); List allNodes=dfsCluster.getDataNodes(); for (int i=allNodes.size() - 1; i >= 0; i--) { if (existingNodes.contains(allNodes.get(i))) { dfsCluster.stopDataNode(i); } } assertTrue("DataNodes " + dfsCluster.getDataNodes().size() + " default replication "+ fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()),dfsCluster.getDataNodes().size() >= fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1); writeData(table,2); long curTime=System.currentTimeMillis(); LOG.info("log.getCurrentFileName(): " + log.getCurrentFileName()); long oldFilenum=DefaultWALProvider.extractFileNumFromWAL(log); assertTrue("Log should have a timestamp older than now",curTime > oldFilenum && oldFilenum != -1); assertTrue("The log shouldn't have rolled yet",oldFilenum == DefaultWALProvider.extractFileNumFromWAL(log)); final DatanodeInfo[] pipeline=log.getPipeLine(); assertTrue(pipeline.length == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null); writeData(table,2); long newFilenum=DefaultWALProvider.extractFileNumFromWAL(log); assertTrue("Missing datanode should've triggered a log roll",newFilenum > oldFilenum && newFilenum > curTime); assertTrue("The log rolling hook should have been called with the low replication flag",lowReplicationHookCalled.get()); writeData(table,3); assertTrue("The log should not roll again.",DefaultWALProvider.extractFileNumFromWAL(log) == newFilenum); assertTrue(dfsCluster.stopDataNode(pipeline[1].getName()) != null); batchWriteAndWait(table,log,3,false,14000); int replication=log.getLogReplication(); assertTrue("LowReplication Roller should've been disabled, current replication=" + replication,!log.isLowReplicationRollEnabled()); dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(),1,true,null,null); log.rollWriter(true); batchWriteAndWait(table,log,13,true,10000); replication=log.getLogReplication(); assertTrue("New log file should have the default replication instead of " + replication,replication == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); assertTrue("LowReplication Roller should've been enabled",log.isLowReplicationRollEnabled()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that logs are deleted when some region has a compaction * record in WAL and no other records. See HBASE-8597. */ @Test public void testCompactionRecordDoesntBlockRolling() throws Exception { Table table=null; Table t=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); try { table=createTestTable(getName()); server=TEST_UTIL.getRSForFirstRegionInTable(table.getName()); Region region=server.getOnlineRegions(table.getName()).get(0); final WAL log=server.getWAL(region.getRegionInfo()); Store s=region.getStore(HConstants.CATALOG_FAMILY); admin.flush(TableName.NAMESPACE_TABLE_NAME); for (int i=1; i <= 2; ++i) { doPut(table,i); admin.flush(table.getName()); } doPut(table,3); assertEquals("Should have no WAL after initial writes",0,DefaultWALProvider.getNumRolledLogFiles(log)); assertEquals(2,s.getStorefilesCount()); log.rollWriter(); assertEquals("Should have WAL; one table is not flushed",1,DefaultWALProvider.getNumRolledLogFiles(log)); admin.flush(table.getName()); region.compact(false); Assert.assertNotNull(s); for (int waitTime=3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime-=200) { Threads.sleepWithoutInterrupt(200); } assertEquals("Compaction didn't happen",1,s.getStorefilesCount()); doPut(table,0); log.rollWriter(); assertEquals("Should have WAL; one table is not flushed",1,DefaultWALProvider.getNumRolledLogFiles(log)); admin.flush(table.getName()); doPut(table,1); log.rollWriter(); assertEquals("Should have 1 WALs at the end",1,DefaultWALProvider.getNumRolledLogFiles(log)); } finally { if (t != null) t.close(); if (table != null) table.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Tests that logs are deleted * @throws IOException * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException */ @Test public void testLogRolling() throws Exception { this.tableName=getName(); startAndWriteData(); HRegionInfo region=server.getOnlineRegions(TableName.valueOf(tableName)).get(0).getRegionInfo(); final WAL log=server.getWAL(region); LOG.info("after writing there are " + DefaultWALProvider.getNumRolledLogFiles(log) + " log files"); for ( Region r : server.getOnlineRegionsLocalContext()) { r.flush(true); } log.rollWriter(); int count=DefaultWALProvider.getNumRolledLogFiles(log); LOG.info("after flushing all regions and rolling logs there are " + count + " log files"); assertTrue(("actual count: " + count),count <= 2); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that WAL is rolled when all data nodes in the pipeline have been * restarted. * @throws Exception */ @Test public void testLogRollOnPipelineRestart() throws Exception { LOG.info("Starting testLogRollOnPipelineRestart"); assertTrue("This test requires WAL file replication.",fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) > 1); LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); Table t=TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); try { this.server=cluster.getRegionServer(0); HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(getName())); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); Table table=TEST_UTIL.getConnection().getTable(desc.getTableName()); server=TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); HRegionInfo region=server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo(); final WAL log=server.getWAL(region); final List paths=new ArrayList(); final List preLogRolledCalled=new ArrayList(); paths.add(DefaultWALProvider.getCurrentFileName(log)); log.registerWALActionsListener(new WALActionsListener.Base(){ @Override public void preLogRoll( Path oldFile, Path newFile){ LOG.debug("preLogRoll: oldFile=" + oldFile + " newFile="+ newFile); preLogRolledCalled.add(new Integer(1)); } @Override public void postLogRoll( Path oldFile, Path newFile){ paths.add(newFile); } } ); assertTrue("Need append support for this test",FSUtils.isAppendSupported(TEST_UTIL.getConfiguration())); writeData(table,1002); long curTime=System.currentTimeMillis(); LOG.info("log.getCurrentFileName()): " + DefaultWALProvider.getCurrentFileName(log)); long oldFilenum=DefaultWALProvider.extractFileNumFromWAL(log); assertTrue("Log should have a timestamp older than now",curTime > oldFilenum && oldFilenum != -1); assertTrue("The log shouldn't have rolled yet",oldFilenum == DefaultWALProvider.extractFileNumFromWAL(log)); dfsCluster.restartDataNodes(); Thread.sleep(1000); dfsCluster.waitActive(); LOG.info("Data Nodes restarted"); validateData(table,1002); writeData(table,1003); long newFilenum=DefaultWALProvider.extractFileNumFromWAL(log); assertTrue("Missing datanode should've triggered a log roll",newFilenum > oldFilenum && newFilenum > curTime); validateData(table,1003); writeData(table,1004); dfsCluster.restartDataNodes(); Thread.sleep(1000); dfsCluster.waitActive(); LOG.info("Data Nodes restarted"); validateData(table,1004); writeData(table,1005); log.rollWriter(true); assertTrue("preLogRolledCalled has size of " + preLogRolledCalled.size(),preLogRolledCalled.size() >= 1); Set loggedRows=new HashSet(); FSUtils fsUtils=FSUtils.getInstance(fs,TEST_UTIL.getConfiguration()); for ( Path p : paths) { LOG.debug("recovering lease for " + p); fsUtils.recoverFileLease(((HFileSystem)fs).getBackingFs(),p,TEST_UTIL.getConfiguration(),null); LOG.debug("Reading WAL " + FSUtils.getPath(p)); WAL.Reader reader=null; try { reader=WALFactory.createReader(fs,p,TEST_UTIL.getConfiguration()); WAL.Entry entry; while ((entry=reader.next()) != null) { LOG.debug("#" + entry.getKey().getLogSeqNum() + ": "+ entry.getEdit().getCells()); for ( Cell cell : entry.getEdit().getCells()) { loggedRows.add(Bytes.toStringBinary(cell.getRowArray(),cell.getRowOffset(),cell.getRowLength())); } } } catch ( EOFException e) { LOG.debug("EOF reading file " + FSUtils.getPath(p)); } finally { if (reader != null) reader.close(); } } assertTrue(loggedRows.contains("row1002")); assertTrue(loggedRows.contains("row1003")); assertTrue(loggedRows.contains("row1004")); assertTrue(loggedRows.contains("row1005")); for ( Region r : server.getOnlineRegionsLocalContext()) { try { r.flush(true); } catch ( Exception e) { LOG.info(e); } } ResultScanner scanner=table.getScanner(new Scan()); try { for (int i=2; i <= 5; i++) { Result r=scanner.next(); assertNotNull(r); assertFalse(r.isEmpty()); assertEquals("row100" + i,Bytes.toString(r.getRow())); } } finally { scanner.close(); } for ( JVMClusterUtil.RegionServerThread rsThread : TEST_UTIL.getHBaseCluster().getRegionServerThreads()) { assertFalse(rsThread.getRegionServer().isAborted()); } } finally { if (t != null) t.close(); } }

Class: org.apache.hadoop.hbase.regionserver.wal.TestReadOldRootAndMetaEdits

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Inserts three waledits in the wal file, and reads them back. The first edit is of a regular * table, second waledit is for the ROOT table (it will be ignored while reading), * and last waledit is for the hbase:meta table, which will be linked to the new system:meta table. * @throws IOException */ @Test public void testReadOldRootAndMetaEdits() throws IOException { LOG.debug("testReadOldRootAndMetaEdits"); byte[] row=Bytes.toBytes("row"); KeyValue kv=new KeyValue(row,row,row,row); List kvs=new ArrayList(); kvs.add(kv); WALProvider.Writer writer=null; WAL.Reader reader=null; TableName t=TableName.valueOf("t"); HRegionInfo tRegionInfo=null; int logCount=0; long timestamp=System.currentTimeMillis(); Path path=new Path(dir,"t"); try { tRegionInfo=new HRegionInfo(t,HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); WAL.Entry tEntry=createAEntry(new HLogKey(tRegionInfo.getEncodedNameAsBytes(),t,++logCount,timestamp,HConstants.DEFAULT_CLUSTER_ID),kvs); WAL.Entry rootEntry=createAEntry(new HLogKey(Bytes.toBytes(TableName.OLD_ROOT_STR),TableName.OLD_ROOT_TABLE_NAME,++logCount,timestamp,HConstants.DEFAULT_CLUSTER_ID),kvs); WAL.Entry oldMetaEntry=createAEntry(new HLogKey(Bytes.toBytes(TableName.OLD_META_STR),TableName.OLD_META_TABLE_NAME,++logCount,timestamp,HConstants.DEFAULT_CLUSTER_ID),kvs); writer=WALFactory.createWALWriter(fs,path,conf); writer.append(tEntry); writer.append(rootEntry); writer.append(oldMetaEntry); writer.sync(); writer.close(); reader=WALFactory.createReader(fs,path,conf); WAL.Entry entry=reader.next(); assertNotNull(entry); assertTrue(entry.getKey().getTablename().equals(t)); assertEquals(Bytes.toString(entry.getKey().getEncodedRegionName()),Bytes.toString(tRegionInfo.getEncodedNameAsBytes())); entry=reader.next(); assertEquals(entry.getKey().getTablename(),TableName.META_TABLE_NAME); assertNull(reader.next()); } finally { if (writer != null) { writer.close(); } if (reader != null) { reader.close(); } } }

Class: org.apache.hadoop.hbase.regionserver.wal.TestSequenceIdAccounting

InternalCallVerifier EqualityVerifier 
@Test public void testStartCacheFlush(){ SequenceIdAccounting sida=new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); Map m=new HashMap(); m.put(ENCODED_REGION_NAME,HConstants.NO_SEQNUM); assertEquals(HConstants.NO_SEQNUM,(long)sida.startCacheFlush(ENCODED_REGION_NAME,FAMILIES)); sida.completeCacheFlush(ENCODED_REGION_NAME); long sequenceid=1; sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid,true); assertEquals(HConstants.NO_SEQNUM,(long)sida.startCacheFlush(ENCODED_REGION_NAME,FAMILIES)); sida.completeCacheFlush(ENCODED_REGION_NAME); long currentSequenceId=sequenceid; sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid,true); final Set otherFamily=new HashSet(1); otherFamily.add(Bytes.toBytes("otherCf")); sida.update(ENCODED_REGION_NAME,FAMILIES,++sequenceid,true); assertEquals(currentSequenceId,(long)sida.startCacheFlush(ENCODED_REGION_NAME,otherFamily)); sida.completeCacheFlush(ENCODED_REGION_NAME); }

InternalCallVerifier BooleanVerifier 
@Test public void testFindLower(){ SequenceIdAccounting sida=new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); Map m=new HashMap(); m.put(ENCODED_REGION_NAME,HConstants.NO_SEQNUM); long sequenceid=1; sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid,true); sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid++,true); sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid++,true); assertTrue(sida.findLower(m) == null); m.put(ENCODED_REGION_NAME,sida.getLowestSequenceId(ENCODED_REGION_NAME)); assertTrue(sida.findLower(m).length == 1); m.put(ENCODED_REGION_NAME,sida.getLowestSequenceId(ENCODED_REGION_NAME) - 1); assertTrue(sida.findLower(m) == null); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAreAllLower(){ SequenceIdAccounting sida=new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); Map m=new HashMap(); m.put(ENCODED_REGION_NAME,HConstants.NO_SEQNUM); assertTrue(sida.areAllLower(m)); long sequenceid=1; sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid,true); sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid++,true); sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid++,true); assertTrue(sida.areAllLower(m)); m.put(ENCODED_REGION_NAME,sequenceid); assertFalse(sida.areAllLower(m)); long lowest=sida.getLowestSequenceId(ENCODED_REGION_NAME); assertEquals("Lowest should be first sequence id inserted",1,lowest); m.put(ENCODED_REGION_NAME,lowest); assertFalse(sida.areAllLower(m)); sida.startCacheFlush(ENCODED_REGION_NAME,FAMILIES); assertFalse(sida.areAllLower(m)); m.put(ENCODED_REGION_NAME,HConstants.NO_SEQNUM); assertTrue(sida.areAllLower(m)); sida.completeCacheFlush(ENCODED_REGION_NAME); m.put(ENCODED_REGION_NAME,sequenceid); assertTrue(sida.areAllLower(m)); sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid++,true); sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid++,true); sida.update(ENCODED_REGION_NAME,FAMILIES,sequenceid++,true); lowest=sida.getLowestSequenceId(ENCODED_REGION_NAME); m.put(ENCODED_REGION_NAME,lowest); assertFalse(sida.areAllLower(m)); sida.startCacheFlush(ENCODED_REGION_NAME,FAMILIES); assertEquals(HConstants.NO_SEQNUM,sida.getLowestSequenceId(ENCODED_REGION_NAME)); sida.completeCacheFlush(ENCODED_REGION_NAME); assertEquals(HConstants.NO_SEQNUM,sida.getLowestSequenceId(ENCODED_REGION_NAME)); m.put(ENCODED_REGION_NAME,sequenceid); sida.update(ENCODED_REGION_NAME,FAMILIES,++sequenceid,true); sida.update(ENCODED_REGION_NAME,FAMILIES,++sequenceid,true); sida.update(ENCODED_REGION_NAME,FAMILIES,++sequenceid,true); assertTrue(sida.areAllLower(m)); }

Class: org.apache.hadoop.hbase.regionserver.wal.TestWALReplay

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSequentialEditLogSeqNum() throws IOException { final TableName tableName=TableName.valueOf(currentTest.getMethodName()); final HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); final Path basedir=FSUtils.getTableDir(this.hbaseRootDir,tableName); deleteDir(basedir); final byte[] rowName=tableName.getName(); final int countPerFamily=10; final HTableDescriptor htd=createBasic1FamilyHTD(tableName); MockWAL wal=createMockWAL(); HRegion region=HRegion.openHRegion(this.conf,this.fs,hbaseRootDir,hri,htd,wal); for ( HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName,hcd.getName(),countPerFamily,this.ee,region,"x"); } region.flush(true); for ( HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName,hcd.getName(),5,this.ee,region,"x"); } long lastestSeqNumber=region.getReadPoint(null); wal.doCompleteCacheFlush=true; wal.completeCacheFlush(hri.getEncodedNameAsBytes()); wal.shutdown(); FileStatus[] listStatus=wal.getFiles(); assertNotNull(listStatus); assertTrue(listStatus.length > 0); WALSplitter.splitLogFile(hbaseRootDir,listStatus[0],this.fs,this.conf,null,null,null,mode,wals); FileStatus[] listStatus1=this.fs.listStatus(new Path(FSUtils.getTableDir(hbaseRootDir,tableName),new Path(hri.getEncodedName(),"recovered.edits")),new PathFilter(){ @Override public boolean accept( Path p){ if (WALSplitter.isSequenceIdFile(p)) { return false; } return true; } } ); int editCount=0; for ( FileStatus fileStatus : listStatus1) { editCount=Integer.parseInt(fileStatus.getPath().getName()); } assertEquals("The sequence number of the recoverd.edits and the current edit seq should be same",lastestSeqNumber,editCount); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * @throws Exception */ @Test public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception { final TableName tableName=TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF"); byte[] family1=Bytes.toBytes("cf1"); byte[] family2=Bytes.toBytes("cf2"); byte[] qualifier=Bytes.toBytes("q"); byte[] value=Bytes.toBytes("testV"); byte[][] familys={family1,family2}; TEST_UTIL.createTable(tableName,familys); Table htable=TEST_UTIL.getConnection().getTable(tableName); Put put=new Put(Bytes.toBytes("r1")); put.addColumn(family1,qualifier,value); htable.put(put); ResultScanner resultScanner=htable.getScanner(new Scan()); int count=0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(1,count); MiniHBaseCluster hbaseCluster=TEST_UTIL.getMiniHBaseCluster(); List regions=hbaseCluster.getRegions(tableName); assertEquals(1,regions.size()); Region destRegion=regions.get(0); int originServerNum=hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName()); assertTrue("Please start more than 1 regionserver",hbaseCluster.getRegionServerThreads().size() > 1); int destServerNum=0; while (destServerNum == originServerNum) { destServerNum++; } HRegionServer originServer=hbaseCluster.getRegionServer(originServerNum); HRegionServer destServer=hbaseCluster.getRegionServer(destServerNum); moveRegionAndWait(destRegion,destServer); Delete del=new Delete(Bytes.toBytes("r1")); htable.delete(del); resultScanner=htable.getScanner(new Scan()); count=0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(0,count); Region region=destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName()); region.flush(true); for ( Store store : region.getStores()) { store.triggerMajorCompaction(); } region.compact(true); moveRegionAndWait(destRegion,originServer); originServer.abort("testing"); Result result=htable.get(new Get(Bytes.toBytes("r1"))); if (result != null) { assertTrue("Row is deleted, but we get" + result.toString(),(result == null) || result.isEmpty()); } resultScanner.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Create an HRegion with the result of a WAL split and test we only see the * good edits * @throws Exception */ @Test public void testReplayEditsWrittenIntoWAL() throws Exception { final TableName tableName=TableName.valueOf("testReplayEditsWrittenIntoWAL"); final MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); final HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); final Path basedir=FSUtils.getTableDir(hbaseRootDir,tableName); deleteDir(basedir); final HTableDescriptor htd=createBasic3FamilyHTD(tableName); HRegion region2=HBaseTestingUtility.createRegionAndWAL(hri,hbaseRootDir,this.conf,htd); HBaseTestingUtility.closeRegionAndWAL(region2); final WAL wal=createWAL(this.conf); final byte[] rowName=tableName.getName(); final byte[] regionName=hri.getEncodedNameAsBytes(); final int countPerFamily=1000; Set familyNames=new HashSet(); for ( HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName,hri,rowName,hcd.getName(),countPerFamily,ee,wal,htd,mvcc); familyNames.add(hcd.getName()); } wal.startCacheFlush(regionName,familyNames); wal.completeCacheFlush(regionName); WALEdit edit=new WALEdit(); long now=ee.currentTime(); edit.add(new KeyValue(rowName,Bytes.toBytes("another family"),rowName,now,rowName)); wal.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),tableName,now,mvcc),edit,true); edit=new WALEdit(); now=ee.currentTime(); edit.add(new KeyValue(rowName,Bytes.toBytes("c"),null,now,KeyValue.Type.DeleteFamily)); wal.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),tableName,now,mvcc),edit,true); wal.sync(); final Configuration newConf=HBaseConfiguration.create(this.conf); User user=HBaseTestingUtility.getDifferentUser(newConf,".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { runWALSplit(newConf); FileSystem newFS=FileSystem.get(newConf); newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,1024 * 100); WAL newWal=createWAL(newConf); final AtomicInteger flushcount=new AtomicInteger(0); try { final HRegion region=new HRegion(basedir,newWal,newFS,newConf,hri,htd,null){ @Override protected FlushResult internalFlushcache( final WAL wal, final long myseqid, final Collection storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) throws IOException { LOG.info("InternalFlushCache Invoked"); FlushResult fs=super.internalFlushcache(wal,myseqid,storesToFlush,Mockito.mock(MonitoredTask.class),writeFlushWalMarker); flushcount.incrementAndGet(); return fs; } } ; long seqid=region.initialize(); long writePoint=mvcc.getWritePoint(); assertTrue("Flushcount=" + flushcount.get(),flushcount.get() > 0); assertTrue((seqid - 1) == writePoint); Get get=new Get(rowName); Result result=region.get(get); assertEquals(countPerFamily * (htd.getFamilies().size() - 1),result.size()); region.close(); } finally { newWal.close(); } return null; } } ); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test case of HRegion that is only made out of bulk loaded files. Assert * that we don't 'crash'. * @throws IOException * @throws IllegalAccessException * @throws NoSuchFieldException * @throws IllegalArgumentException * @throws SecurityException */ @Test public void testRegionMadeOfBulkLoadedFilesOnly() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName=TableName.valueOf("testRegionMadeOfBulkLoadedFilesOnly"); final HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); final Path basedir=new Path(this.hbaseRootDir,tableName.getNameAsString()); deleteDir(basedir); final HTableDescriptor htd=createBasic3FamilyHTD(tableName); Region region2=HBaseTestingUtility.createRegionAndWAL(hri,hbaseRootDir,this.conf,htd); HBaseTestingUtility.closeRegionAndWAL(region2); WAL wal=createWAL(this.conf); Region region=HRegion.openHRegion(hri,htd,wal,this.conf); byte[] family=htd.getFamilies().iterator().next().getName(); Path f=new Path(basedir,"hfile"); HFileTestUtil.createHFile(this.conf,fs,f,family,family,Bytes.toBytes(""),Bytes.toBytes("z"),10); List> hfs=new ArrayList>(1); hfs.add(Pair.newPair(family,f.toString())); region.bulkLoadHFiles(hfs,true,null); byte[] row=tableName.getName(); region.put((new Put(row)).addColumn(family,family,family)); wal.sync(); final int rowsInsertedCount=11; assertEquals(rowsInsertedCount,getScannedCount(region.getScanner(new Scan()))); final Configuration newConf=HBaseConfiguration.create(this.conf); User user=HBaseTestingUtility.getDifferentUser(newConf,tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { runWALSplit(newConf); WAL wal2=createWAL(newConf); HRegion region2=HRegion.openHRegion(newConf,FileSystem.get(newConf),hbaseRootDir,hri,htd,wal2); long seqid2=region2.getOpenSeqNum(); assertTrue(seqid2 > -1); assertEquals(rowsInsertedCount,getScannedCount(region2.getScanner(new Scan()))); region2.close(); wal2.close(); return null; } } ); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that we could recover the data correctly after aborting flush. In the * test, first we abort flush after writing some data, then writing more data * and flush again, at last verify the data. * @throws IOException */ @Test public void testReplayEditsAfterAbortingFlush() throws IOException { final TableName tableName=TableName.valueOf("testReplayEditsAfterAbortingFlush"); final HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); final Path basedir=FSUtils.getTableDir(this.hbaseRootDir,tableName); deleteDir(basedir); final HTableDescriptor htd=createBasic3FamilyHTD(tableName); HRegion region3=HBaseTestingUtility.createRegionAndWAL(hri,hbaseRootDir,this.conf,htd); HBaseTestingUtility.closeRegionAndWAL(region3); WAL wal=createWAL(this.conf); RegionServerServices rsServices=Mockito.mock(RegionServerServices.class); Mockito.doReturn(false).when(rsServices).isAborted(); when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo",10,10)); Configuration customConf=new Configuration(this.conf); customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,CustomStoreFlusher.class.getName()); HRegion region=HRegion.openHRegion(this.hbaseRootDir,hri,htd,wal,customConf,rsServices,null); int writtenRowCount=10; List families=new ArrayList(htd.getFamilies()); for (int i=0; i < writtenRowCount; i++) { Put put=new Put(Bytes.toBytes(tableName + Integer.toString(i))); put.addColumn(families.get(i % families.size()).getName(),Bytes.toBytes("q"),Bytes.toBytes("val")); region.put(put); } RegionScanner scanner=region.getScanner(new Scan()); assertEquals(writtenRowCount,getScannedCount(scanner)); CustomStoreFlusher.throwExceptionWhenFlushing.set(true); try { region.flush(true); fail("Injected exception hasn't been thrown"); } catch ( Throwable t) { LOG.info("Expected simulated exception when flushing region," + t.getMessage()); Mockito.doReturn(true).when(rsServices).isAborted(); region.setClosing(false); } int moreRow=10; for (int i=writtenRowCount; i < writtenRowCount + moreRow; i++) { Put put=new Put(Bytes.toBytes(tableName + Integer.toString(i))); put.addColumn(families.get(i % families.size()).getName(),Bytes.toBytes("q"),Bytes.toBytes("val")); region.put(put); } writtenRowCount+=moreRow; CustomStoreFlusher.throwExceptionWhenFlushing.set(false); try { region.flush(true); } catch ( IOException t) { LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage()); } region.close(true); wal.shutdown(); runWALSplit(this.conf); WAL wal2=createWAL(this.conf); Mockito.doReturn(false).when(rsServices).isAborted(); HRegion region2=HRegion.openHRegion(this.hbaseRootDir,hri,htd,wal2,this.conf,rsServices,null); scanner=region2.getScanner(new Scan()); assertEquals(writtenRowCount,getScannedCount(scanner)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that we recover correctly when there is a failure in between the * flushes. i.e. Some stores got flushed but others did not. * Unfortunately, there is no easy hook to flush at a store level. The way * we get around this is by flushing at the region level, and then deleting * the recently flushed store file for one of the Stores. This would put us * back in the situation where all but that store got flushed and the region * died. * We restart Region again, and verify that the edits were replayed. * @throws IOException * @throws IllegalAccessException * @throws NoSuchFieldException * @throws IllegalArgumentException * @throws SecurityException */ @Test public void testReplayEditsAfterPartialFlush() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName=TableName.valueOf("testReplayEditsWrittenViaHRegion"); final HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); final Path basedir=FSUtils.getTableDir(this.hbaseRootDir,tableName); deleteDir(basedir); final byte[] rowName=tableName.getName(); final int countPerFamily=10; final HTableDescriptor htd=createBasic3FamilyHTD(tableName); HRegion region3=HBaseTestingUtility.createRegionAndWAL(hri,hbaseRootDir,this.conf,htd); HBaseTestingUtility.closeRegionAndWAL(region3); WAL wal=createWAL(this.conf); HRegion region=HRegion.openHRegion(this.conf,this.fs,hbaseRootDir,hri,htd,wal); long seqid=region.getOpenSeqNum(); for ( HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName,hcd.getName(),countPerFamily,this.ee,region,"x"); } final Get g=new Get(rowName); Result result=region.get(g); assertEquals(countPerFamily * htd.getFamilies().size(),result.size()); region.flush(true); region.close(true); wal.shutdown(); int cf_count=0; for ( HColumnDescriptor hcd : htd.getFamilies()) { cf_count++; if (cf_count == 2) { region.getRegionFileSystem().deleteFamily(hcd.getNameAsString()); } } runWALSplit(this.conf); WAL wal2=createWAL(this.conf); HRegion region2=HRegion.openHRegion(this.conf,this.fs,hbaseRootDir,hri,htd,wal2); long seqid2=region2.getOpenSeqNum(); assertTrue(seqid + result.size() < seqid2); final Result result1b=region2.get(g); assertEquals(result.size(),result1b.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests for hbase-2727. * @throws Exception * @see HBASE-2727 */ @Test public void test2727() throws Exception { final TableName tableName=TableName.valueOf("test2727"); MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); Path basedir=FSUtils.getTableDir(hbaseRootDir,tableName); deleteDir(basedir); HTableDescriptor htd=createBasic3FamilyHTD(tableName); Region region2=HBaseTestingUtility.createRegionAndWAL(hri,hbaseRootDir,this.conf,htd); HBaseTestingUtility.closeRegionAndWAL(region2); final byte[] rowName=tableName.getName(); WAL wal1=createWAL(this.conf); final int countPerFamily=1000; for ( HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName,hri,rowName,hcd.getName(),countPerFamily,ee,wal1,htd,mvcc); } wal1.shutdown(); runWALSplit(this.conf); WAL wal2=createWAL(this.conf); for ( HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName,hri,rowName,hcd.getName(),countPerFamily,ee,wal2,htd,mvcc); } wal2.shutdown(); runWALSplit(this.conf); WAL wal3=createWAL(this.conf); try { HRegion region=HRegion.openHRegion(this.conf,this.fs,hbaseRootDir,hri,htd,wal3); long seqid=region.getOpenSeqNum(); assertTrue(seqid > mvcc.getWritePoint()); assertEquals(seqid - 1,mvcc.getWritePoint()); LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: "+ mvcc.getReadPoint()); region.close(); } finally { wal3.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test writing edits into an HRegion, closing it, splitting logs, opening * Region again. Verify seqids. * @throws IOException * @throws IllegalAccessException * @throws NoSuchFieldException * @throws IllegalArgumentException * @throws SecurityException */ @Test public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName=TableName.valueOf("testReplayEditsWrittenViaHRegion"); final HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); final Path basedir=FSUtils.getTableDir(this.hbaseRootDir,tableName); deleteDir(basedir); final byte[] rowName=tableName.getName(); final int countPerFamily=10; final HTableDescriptor htd=createBasic3FamilyHTD(tableName); HRegion region3=HBaseTestingUtility.createRegionAndWAL(hri,hbaseRootDir,this.conf,htd); HBaseTestingUtility.closeRegionAndWAL(region3); WAL wal=createWAL(this.conf); HRegion region=HRegion.openHRegion(this.conf,this.fs,hbaseRootDir,hri,htd,wal); long seqid=region.getOpenSeqNum(); boolean first=true; for ( HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName,hcd.getName(),countPerFamily,this.ee,region,"x"); if (first) { region.flush(true); first=false; } } final Get g=new Get(rowName); Result result=region.get(g); assertEquals(countPerFamily * htd.getFamilies().size(),result.size()); region.close(true); wal.shutdown(); runWALSplit(this.conf); WAL wal2=createWAL(this.conf); HRegion region2=HRegion.openHRegion(conf,this.fs,hbaseRootDir,hri,htd,wal2); long seqid2=region2.getOpenSeqNum(); assertTrue(seqid + result.size() < seqid2); final Result result1b=region2.get(g); assertEquals(result.size(),result1b.size()); for ( HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName,hcd.getName(),countPerFamily,this.ee,region2,"y"); } final Result result2=region2.get(g); assertEquals(2 * result.size(),result2.size()); wal2.sync(); final Configuration newConf=HBaseConfiguration.create(this.conf); User user=HBaseTestingUtility.getDifferentUser(newConf,tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { runWALSplit(newConf); FileSystem newFS=FileSystem.get(newConf); WAL wal3=createWAL(newConf); final AtomicInteger countOfRestoredEdits=new AtomicInteger(0); HRegion region3=new HRegion(basedir,wal3,newFS,newConf,hri,htd,null){ @Override protected boolean restoreEdit( Store s, Cell cell){ boolean b=super.restoreEdit(s,cell); countOfRestoredEdits.incrementAndGet(); return b; } } ; long seqid3=region3.initialize(); Result result3=region3.get(g); assertEquals(result2.size(),result3.size()); assertEquals(htd.getFamilies().size() * countPerFamily,countOfRestoredEdits.get()); region3.close(); wal3.close(); return null; } } ); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * HRegion test case that is made of a major compacted HFile (created with three bulk loaded * files) and an edit in the memstore. * This is for HBASE-10958 "[dataloss] Bulk loading with seqids can prevent some log entries * from being replayed" * @throws IOException * @throws IllegalAccessException * @throws NoSuchFieldException * @throws IllegalArgumentException * @throws SecurityException */ @Test public void testCompactedBulkLoadedFiles() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName=TableName.valueOf("testCompactedBulkLoadedFiles"); final HRegionInfo hri=createBasic3FamilyHRegionInfo(tableName); final Path basedir=new Path(this.hbaseRootDir,tableName.getNameAsString()); deleteDir(basedir); final HTableDescriptor htd=createBasic3FamilyHTD(tableName); HRegion region2=HBaseTestingUtility.createRegionAndWAL(hri,hbaseRootDir,this.conf,htd); HBaseTestingUtility.closeRegionAndWAL(region2); WAL wal=createWAL(this.conf); HRegion region=HRegion.openHRegion(hri,htd,wal,this.conf); byte[] row=tableName.getName(); byte[] family=htd.getFamilies().iterator().next().getName(); region.put((new Put(row)).addColumn(family,family,family)); wal.sync(); List> hfs=new ArrayList>(1); for (int i=0; i < 3; i++) { Path f=new Path(basedir,"hfile" + i); HFileTestUtil.createHFile(this.conf,fs,f,family,family,Bytes.toBytes(i + "00"),Bytes.toBytes(i + "50"),10); hfs.add(Pair.newPair(family,f.toString())); } region.bulkLoadHFiles(hfs,true,null); final int rowsInsertedCount=31; assertEquals(rowsInsertedCount,getScannedCount(region.getScanner(new Scan()))); region.compact(true); assertEquals(rowsInsertedCount,getScannedCount(region.getScanner(new Scan()))); final Configuration newConf=HBaseConfiguration.create(this.conf); User user=HBaseTestingUtility.getDifferentUser(newConf,tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { runWALSplit(newConf); WAL wal2=createWAL(newConf); HRegion region2=HRegion.openHRegion(newConf,FileSystem.get(newConf),hbaseRootDir,hri,htd,wal2); long seqid2=region2.getOpenSeqNum(); assertTrue(seqid2 > -1); assertEquals(rowsInsertedCount,getScannedCount(region2.getScanner(new Scan()))); region2.close(); wal2.close(); return null; } } ); }

Class: org.apache.hadoop.hbase.replication.TestReplicationChangingPeerRegionservers

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testChangingNumberOfPeerRegionServers() throws IOException, InterruptedException { LOG.info("testSimplePutDelete"); MiniHBaseCluster peerCluster=utility2.getMiniHBaseCluster(); int numRS=peerCluster.getRegionServerThreads().size(); doPutTest(Bytes.toBytes(1)); int rsToStop=peerCluster.getServerWithMeta() == 0 ? 1 : 0; peerCluster.stopRegionServer(rsToStop); peerCluster.waitOnRegionServer(rsToStop); assertEquals(numRS - 1,peerCluster.getRegionServerThreads().size()); doPutTest(Bytes.toBytes(2)); peerCluster.startRegionServer(); assertEquals(numRS,peerCluster.getRegionServerThreads().size()); doPutTest(Bytes.toBytes(3)); }

Class: org.apache.hadoop.hbase.replication.TestReplicationDisableInactivePeer

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test disabling an inactive peer. Add a peer which is inactive, trying to * insert, disable the peer, then activate the peer and make sure nothing is * replicated. In Addition, enable the peer and check the updates are * replicated. * @throws Exception */ @Test(timeout=600000) public void testDisableInactivePeer() throws Exception { admin.enablePeer("2"); utility2.shutdownMiniHBaseCluster(); byte[] rowkey=Bytes.toBytes("disable inactive peer"); Put put=new Put(rowkey); put.addColumn(famName,row,row); htable1.put(put); Thread.sleep(SLEEP_TIME * NB_RETRIES); admin.disablePeer("2"); utility2.startMiniHBaseCluster(1,2); Get get=new Get(rowkey); for (int i=0; i < NB_RETRIES; i++) { Result res=htable2.get(get); if (res.size() >= 1) { fail("Replication wasn't disabled"); } else { LOG.info("Row not replicated, let's wait a bit more..."); Thread.sleep(SLEEP_TIME); } } admin.enablePeer("2"); Thread.sleep(SLEEP_TIME * NB_RETRIES); for (int i=0; i < NB_RETRIES; i++) { Result res=htable2.get(get); if (res.size() == 0) { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME * NB_RETRIES); } else { assertArrayEquals(res.value(),row); return; } } fail("Waited too much time for put replication"); }

Class: org.apache.hadoop.hbase.replication.TestReplicationSmallTests

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Integration test for TestReplicationAdmin, removes and re-add a peer * cluster * @throws Exception */ @Test(timeout=300000) public void testAddAndRemoveClusters() throws Exception { LOG.info("testAddAndRemoveClusters"); admin.removePeer("2"); Thread.sleep(SLEEP_TIME); byte[] rowKey=Bytes.toBytes("Won't be replicated"); Put put=new Put(rowKey); put.addColumn(famName,row,row); htable1.put(put); Get get=new Get(rowKey); for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { break; } Result res=htable2.get(get); if (res.size() >= 1) { fail("Not supposed to be replicated"); } else { LOG.info("Row not replicated, let's wait a bit more..."); Thread.sleep(SLEEP_TIME); } } admin.addPeer("2",utility2.getClusterKey()); Thread.sleep(SLEEP_TIME); rowKey=Bytes.toBytes("do rep"); put=new Put(rowKey); put.addColumn(famName,row,row); LOG.info("Adding new row"); htable1.put(put); get=new Get(rowKey); for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } Result res=htable2.get(get); if (res.size() == 0) { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME * i); } else { assertArrayEquals(res.value(),row); break; } } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Verify that version and column delete marker types are replicated * correctly. * @throws Exception */ @Test(timeout=300000) public void testDeleteTypes() throws Exception { LOG.info("testDeleteTypes"); final byte[] v1=Bytes.toBytes("v1"); final byte[] v2=Bytes.toBytes("v2"); final byte[] v3=Bytes.toBytes("v3"); htable1=utility1.getConnection().getTable(tableName); long t=EnvironmentEdgeManager.currentTime(); Put put=new Put(row); put.addColumn(famName,row,t,v1); htable1.put(put); put=new Put(row); put.addColumn(famName,row,t + 1,v2); htable1.put(put); put=new Put(row); put.addColumn(famName,row,t + 2,v3); htable1.put(put); Get get=new Get(row); get.setMaxVersions(); for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } Result res=htable2.get(get); if (res.size() < 3) { LOG.info("Rows not available"); Thread.sleep(SLEEP_TIME); } else { assertArrayEquals(CellUtil.cloneValue(res.rawCells()[0]),v3); assertArrayEquals(CellUtil.cloneValue(res.rawCells()[1]),v2); assertArrayEquals(CellUtil.cloneValue(res.rawCells()[2]),v1); break; } } Delete d=new Delete(row); d.addColumn(famName,row,t); htable1.delete(d); get=new Get(row); get.setMaxVersions(); for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } Result res=htable2.get(get); if (res.size() > 2) { LOG.info("Version not deleted"); Thread.sleep(SLEEP_TIME); } else { assertArrayEquals(CellUtil.cloneValue(res.rawCells()[0]),v3); assertArrayEquals(CellUtil.cloneValue(res.rawCells()[1]),v2); break; } } d=new Delete(row); d.addColumns(famName,row,t + 2); htable1.delete(d); get=new Get(row); for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for del replication"); } Result res=htable2.get(get); if (res.size() >= 1) { LOG.info("Rows not deleted"); Thread.sleep(SLEEP_TIME); } else { break; } } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Do a more intense version testSmallBatch, one that will trigger * wal rolling and other non-trivial code paths * @throws Exception */ @Test(timeout=300000) public void testLoading() throws Exception { LOG.info("Writing out rows to table1 in testLoading"); List puts=new ArrayList(); for (int i=0; i < NB_ROWS_IN_BIG_BATCH; i++) { Put put=new Put(Bytes.toBytes(i)); put.addColumn(famName,row,row); puts.add(put); } htable1.setWriteBufferSize(1024); htable1.put(puts); Scan scan=new Scan(); ResultScanner scanner=htable1.getScanner(scan); Result[] res=scanner.next(NB_ROWS_IN_BIG_BATCH); scanner.close(); assertEquals(NB_ROWS_IN_BIG_BATCH,res.length); LOG.info("Looking in table2 for replicated rows in testLoading"); long start=System.currentTimeMillis(); final long retries=NB_RETRIES * 10; for (int i=0; i < retries; i++) { scan=new Scan(); scanner=htable2.getScanner(scan); res=scanner.next(NB_ROWS_IN_BIG_BATCH); scanner.close(); if (res.length != NB_ROWS_IN_BIG_BATCH) { if (i == retries - 1) { int lastRow=-1; for ( Result result : res) { int currentRow=Bytes.toInt(result.getRow()); for (int row=lastRow + 1; row < currentRow; row++) { LOG.error("Row missing: " + row); } lastRow=currentRow; } LOG.error("Last row: " + lastRow); fail("Waited too much time for normal batch replication, " + res.length + " instead of "+ NB_ROWS_IN_BIG_BATCH+ "; waited="+ (System.currentTimeMillis() - start)+ "ms"); } else { LOG.info("Only got " + res.length + " rows... retrying"); Thread.sleep(SLEEP_TIME); } } else { break; } } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test disable/enable replication, trying to insert, make sure nothing's * replicated, enable it, the insert should be replicated * @throws Exception */ @Test(timeout=300000) public void testDisableEnable() throws Exception { admin.disablePeer("2"); byte[] rowkey=Bytes.toBytes("disable enable"); Put put=new Put(rowkey); put.addColumn(famName,row,row); htable1.put(put); Get get=new Get(rowkey); for (int i=0; i < NB_RETRIES; i++) { Result res=htable2.get(get); if (res.size() >= 1) { fail("Replication wasn't disabled"); } else { LOG.info("Row not replicated, let's wait a bit more..."); Thread.sleep(SLEEP_TIME); } } admin.enablePeer("2"); for (int i=0; i < NB_RETRIES; i++) { Result res=htable2.get(get); if (res.size() == 0) { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME); } else { assertArrayEquals(res.value(),row); return; } } fail("Waited too much time for put replication"); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Do a small loading into a table, make sure the data is really the same, * then run the VerifyReplication job to check the results. Do a second * comparison where all the cells are different. * @throws Exception */ @Test(timeout=300000) public void testVerifyRepJob() throws Exception { testSmallBatch(); String[] args=new String[]{"2",tableName.getNameAsString()}; Job job=VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS,args); if (job == null) { fail("Job wasn't created, see the log"); } if (!job.waitForCompletion(true)) { fail("Job failed, see the log"); } assertEquals(NB_ROWS_IN_BATCH,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); assertEquals(0,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); Scan scan=new Scan(); ResultScanner rs=htable2.getScanner(scan); Put put=null; for ( Result result : rs) { put=new Put(result.getRow()); Cell firstVal=result.rawCells()[0]; put.addColumn(CellUtil.cloneFamily(firstVal),CellUtil.cloneQualifier(firstVal),Bytes.toBytes("diff data")); htable2.put(put); } Delete delete=new Delete(put.getRow()); htable2.delete(delete); job=VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS,args); if (job == null) { fail("Job wasn't created, see the log"); } if (!job.waitForCompletion(true)) { fail("Job failed, see the log"); } assertEquals(0,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); assertEquals(NB_ROWS_IN_BATCH,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); }

InternalCallVerifier BooleanVerifier 
/** * Test for HBASE-9531 * put a few rows into htable1, which should be replicated to htable2 * create a ClusterStatus instance 'status' from HBaseAdmin * test : status.getLoad(server).getReplicationLoadSourceList() * test : status.getLoad(server).getReplicationLoadSink() * * @throws Exception */ @Test(timeout=300000) public void testReplicationStatus() throws Exception { LOG.info("testReplicationStatus"); try (Admin admin=utility1.getConnection().getAdmin()){ final byte[] qualName=Bytes.toBytes("q"); Put p; for (int i=0; i < NB_ROWS_IN_BATCH; i++) { p=new Put(Bytes.toBytes("row" + i)); p.addColumn(famName,qualName,Bytes.toBytes("val" + i)); htable1.put(p); } ClusterStatus status=admin.getClusterStatus(); for ( ServerName server : status.getServers()) { ServerLoad sl=status.getLoad(server); List rLoadSourceList=sl.getReplicationLoadSourceList(); ReplicationLoadSink rLoadSink=sl.getReplicationLoadSink(); assertTrue("failed to get ReplicationLoadSourceList",(rLoadSourceList.size() > 0)); assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ",(rLoadSink.getAgeOfLastAppliedOp() >= 0)); assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ",(rLoadSink.getTimeStampsOfLastAppliedOp() >= 0)); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Try a small batch upload using the write buffer, check it's replicated * @throws Exception */ @Test(timeout=300000) public void testSmallBatch() throws Exception { LOG.info("testSmallBatch"); List puts=new ArrayList<>(); for (int i=0; i < NB_ROWS_IN_BATCH; i++) { Put put=new Put(Bytes.toBytes(i)); put.addColumn(famName,row,row); puts.add(put); } htable1.put(puts); Scan scan=new Scan(); ResultScanner scanner1=htable1.getScanner(scan); Result[] res1=scanner1.next(NB_ROWS_IN_BATCH); scanner1.close(); assertEquals(NB_ROWS_IN_BATCH,res1.length); for (int i=0; i < NB_RETRIES; i++) { scan=new Scan(); if (i == NB_RETRIES - 1) { fail("Waited too much time for normal batch replication"); } ResultScanner scanner=htable2.getScanner(scan); Result[] res=scanner.next(NB_ROWS_IN_BATCH); scanner.close(); if (res.length != NB_ROWS_IN_BATCH) { LOG.info("Only got " + res.length + " rows"); Thread.sleep(SLEEP_TIME); } else { break; } } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Add a row, check it's replicated, delete it, check's gone * @throws Exception */ @Test(timeout=300000) public void testSimplePutDelete() throws Exception { LOG.info("testSimplePutDelete"); Put put=new Put(row); put.addColumn(famName,row,row); htable1=utility1.getConnection().getTable(tableName); htable1.put(put); Get get=new Get(row); for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } Result res=htable2.get(get); if (res.size() == 0) { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME); } else { assertArrayEquals(res.value(),row); break; } } Delete del=new Delete(row); htable1.delete(del); get=new Get(row); for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for del replication"); } Result res=htable2.get(get); if (res.size() >= 1) { LOG.info("Row not deleted"); Thread.sleep(SLEEP_TIME); } else { break; } } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testVersionMismatchHBase14905() throws Exception { byte[] qualifierName=Bytes.toBytes("f1"); Put put=new Put(Bytes.toBytes("r1")); long ts=System.currentTimeMillis(); put.addColumn(famName,qualifierName,ts + 1,Bytes.toBytes("v1")); htable1.put(put); put.addColumn(famName,qualifierName,ts + 2,Bytes.toBytes("v2")); htable1.put(put); put.addColumn(famName,qualifierName,ts + 3,Bytes.toBytes("v3")); htable1.put(put); Scan scan=new Scan(); scan.setMaxVersions(100); ResultScanner scanner1=htable1.getScanner(scan); Result[] res1=scanner1.next(1); scanner1.close(); assertEquals(1,res1.length); assertEquals(3,res1[0].getColumnCells(famName,qualifierName).size()); for (int i=0; i < NB_RETRIES; i++) { scan=new Scan(); scan.setMaxVersions(100); scanner1=htable2.getScanner(scan); res1=scanner1.next(1); scanner1.close(); if (res1.length != 1) { LOG.info("Only got " + res1.length + " rows"); Thread.sleep(SLEEP_TIME); } else { int cellNumber=res1[0].getColumnCells(famName,Bytes.toBytes("f1")).size(); if (cellNumber != 3) { LOG.info("Only got " + cellNumber + " cells"); Thread.sleep(SLEEP_TIME); } else { break; } } if (i == NB_RETRIES - 1) { fail("Waited too much time for normal batch replication"); } } try { admin.disablePeer("2"); Put put2=new Put(Bytes.toBytes("r1")); put2.addColumn(famName,qualifierName,ts + 2,Bytes.toBytes("v99")); htable2.put(put2); scan=new Scan(); scan.setMaxVersions(100); scanner1=htable2.getScanner(scan); res1=scanner1.next(NB_ROWS_IN_BATCH); scanner1.close(); assertEquals(1,res1.length); assertEquals(3,res1[0].getColumnCells(famName,qualifierName).size()); String[] args=new String[]{"--versions=100","2",tableName.getNameAsString()}; Job job=VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS,args); if (job == null) { fail("Job wasn't created, see the log"); } if (!job.waitForCompletion(true)) { fail("Job failed, see the log"); } assertEquals(0,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); assertEquals(1,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); } finally { admin.enablePeer("2"); } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testHBase14905() throws Exception { byte[] qualifierName=Bytes.toBytes("f1"); Put put=new Put(Bytes.toBytes("r1")); put.addColumn(famName,qualifierName,Bytes.toBytes("v1002")); htable1.put(put); put.addColumn(famName,qualifierName,Bytes.toBytes("v1001")); htable1.put(put); put.addColumn(famName,qualifierName,Bytes.toBytes("v1112")); htable1.put(put); Scan scan=new Scan(); scan.setMaxVersions(100); ResultScanner scanner1=htable1.getScanner(scan); Result[] res1=scanner1.next(1); scanner1.close(); assertEquals(1,res1.length); assertEquals(3,res1[0].getColumnCells(famName,qualifierName).size()); for (int i=0; i < NB_RETRIES; i++) { scan=new Scan(); scan.setMaxVersions(100); scanner1=htable2.getScanner(scan); res1=scanner1.next(1); scanner1.close(); if (res1.length != 1) { LOG.info("Only got " + res1.length + " rows"); Thread.sleep(SLEEP_TIME); } else { int cellNumber=res1[0].getColumnCells(famName,Bytes.toBytes("f1")).size(); if (cellNumber != 3) { LOG.info("Only got " + cellNumber + " cells"); Thread.sleep(SLEEP_TIME); } else { break; } } if (i == NB_RETRIES - 1) { fail("Waited too much time for normal batch replication"); } } put.addColumn(famName,qualifierName,Bytes.toBytes("v1111")); htable2.put(put); put.addColumn(famName,qualifierName,Bytes.toBytes("v1112")); htable2.put(put); scan=new Scan(); scan.setMaxVersions(100); scanner1=htable2.getScanner(scan); res1=scanner1.next(NB_ROWS_IN_BATCH); scanner1.close(); assertEquals(1,res1.length); assertEquals(5,res1[0].getColumnCells(famName,qualifierName).size()); String[] args=new String[]{"--versions=100","2",tableName.getNameAsString()}; Job job=VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS,args); if (job == null) { fail("Job wasn't created, see the log"); } if (!job.waitForCompletion(true)) { fail("Job failed, see the log"); } assertEquals(0,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); assertEquals(1,job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); }

Class: org.apache.hadoop.hbase.replication.TestReplicationSource

APIUtilityVerifier InternalCallVerifier NullVerifier 
/** * Sanity check that we can move logs around while we are reading * from them. Should this test fail, ReplicationSource would have a hard * time reading logs that are being archived. * @throws Exception */ @Test public void testLogMoving() throws Exception { Path logPath=new Path(logDir,"log"); if (!FS.exists(logDir)) FS.mkdirs(logDir); if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir); WALProvider.Writer writer=WALFactory.createWALWriter(FS,logPath,TEST_UTIL.getConfiguration()); for (int i=0; i < 3; i++) { byte[] b=Bytes.toBytes(Integer.toString(i)); KeyValue kv=new KeyValue(b,b,b); WALEdit edit=new WALEdit(); edit.add(kv); WALKey key=new WALKey(b,TableName.valueOf(b),0,0,HConstants.DEFAULT_CLUSTER_ID); writer.append(new WAL.Entry(key,edit)); writer.sync(); } writer.close(); WAL.Reader reader=WALFactory.createReader(FS,logPath,TEST_UTIL.getConfiguration()); WAL.Entry entry=reader.next(); assertNotNull(entry); Path oldLogPath=new Path(oldLogDir,"log"); FS.rename(logPath,oldLogPath); entry=reader.next(); assertNotNull(entry); entry=reader.next(); entry=reader.next(); assertNull(entry); reader.close(); }

Class: org.apache.hadoop.hbase.replication.TestReplicationStateBasic

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRemovePeerForHFileRefs() throws ReplicationException, KeeperException { rq1.init(server1); rqc.init(); rp.init(); rp.addPeer(ID_ONE,new ReplicationPeerConfig().setClusterKey(KEY_ONE),null); rp.addPeer(ID_TWO,new ReplicationPeerConfig().setClusterKey(KEY_TWO),null); List files1=new ArrayList(3); files1.add("file_1"); files1.add("file_2"); files1.add("file_3"); rq1.addHFileRefs(ID_ONE,files1); rq1.addHFileRefs(ID_TWO,files1); assertEquals(2,rqc.getAllPeersFromHFileRefsQueue().size()); assertEquals(3,rqc.getReplicableHFiles(ID_ONE).size()); assertEquals(3,rqc.getReplicableHFiles(ID_TWO).size()); rp.removePeer(ID_ONE); assertEquals(1,rqc.getAllPeersFromHFileRefsQueue().size()); assertNull(rqc.getReplicableHFiles(ID_ONE)); assertEquals(3,rqc.getReplicableHFiles(ID_TWO).size()); rp.removePeer(ID_TWO); assertEquals(0,rqc.getAllPeersFromHFileRefsQueue().size()); assertNull(rqc.getReplicableHFiles(ID_TWO)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReplicationQueues() throws ReplicationException { rq1.init(server1); rq2.init(server2); rq3.init(server3); rp.init(); assertEquals(3,rq1.getListOfReplicators().size()); rq1.removeQueue("bogus"); rq1.removeLog("bogus","bogus"); rq1.removeAllQueues(); assertNull(rq1.getAllQueues()); assertEquals(0,rq1.getLogPosition("bogus","bogus")); assertNull(rq1.getLogsInQueue("bogus")); assertEquals(0,rq1.claimQueues(ServerName.valueOf("bogus",1234,-1L).toString()).size()); rq1.setLogPosition("bogus","bogus",5L); populateQueues(); assertEquals(3,rq1.getListOfReplicators().size()); assertEquals(0,rq2.getLogsInQueue("qId1").size()); assertEquals(5,rq3.getLogsInQueue("qId5").size()); assertEquals(0,rq3.getLogPosition("qId1","filename0")); rq3.setLogPosition("qId5","filename4",354L); assertEquals(354L,rq3.getLogPosition("qId5","filename4")); assertEquals(5,rq3.getLogsInQueue("qId5").size()); assertEquals(0,rq2.getLogsInQueue("qId1").size()); assertEquals(0,rq1.getAllQueues().size()); assertEquals(1,rq2.getAllQueues().size()); assertEquals(5,rq3.getAllQueues().size()); assertEquals(0,rq3.claimQueues(server1).size()); assertEquals(2,rq3.getListOfReplicators().size()); SortedMap> queues=rq2.claimQueues(server3); assertEquals(5,queues.size()); assertEquals(1,rq2.getListOfReplicators().size()); assertEquals(0,rq2.claimQueues(server2).size()); assertEquals(6,rq2.getAllQueues().size()); rq2.removeAllQueues(); assertEquals(0,rq2.getListOfReplicators().size()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testHfileRefsReplicationQueues() throws ReplicationException, KeeperException { rp.init(); rq1.init(server1); rqc.init(); List files1=new ArrayList(3); files1.add("file_1"); files1.add("file_2"); files1.add("file_3"); assertNull(rqc.getReplicableHFiles(ID_ONE)); assertEquals(0,rqc.getAllPeersFromHFileRefsQueue().size()); rp.addPeer(ID_ONE,new ReplicationPeerConfig().setClusterKey(KEY_ONE),null); rq1.addHFileRefs(ID_ONE,files1); assertEquals(1,rqc.getAllPeersFromHFileRefsQueue().size()); assertEquals(3,rqc.getReplicableHFiles(ID_ONE).size()); List files2=new ArrayList<>(files1); String removedString=files2.remove(0); rq1.removeHFileRefs(ID_ONE,files2); assertEquals(1,rqc.getReplicableHFiles(ID_ONE).size()); files2=new ArrayList<>(1); files2.add(removedString); rq1.removeHFileRefs(ID_ONE,files2); assertEquals(0,rqc.getReplicableHFiles(ID_ONE).size()); rp.removePeer(ID_ONE); }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReplicationPeers() throws Exception { rp.init(); try { rp.removePeer("bogus"); fail("Should have thrown an IllegalArgumentException when passed a bogus peerId"); } catch ( IllegalArgumentException e) { } try { rp.enablePeer("bogus"); fail("Should have thrown an IllegalArgumentException when passed a bogus peerId"); } catch ( IllegalArgumentException e) { } try { rp.disablePeer("bogus"); fail("Should have thrown an IllegalArgumentException when passed a bogus peerId"); } catch ( IllegalArgumentException e) { } try { rp.getStatusOfPeer("bogus"); fail("Should have thrown an IllegalArgumentException when passed a bogus peerId"); } catch ( IllegalArgumentException e) { } assertFalse(rp.peerAdded("bogus")); rp.peerRemoved("bogus"); assertNull(rp.getPeerConf("bogus")); assertNumberOfPeers(0); rp.addPeer(ID_ONE,new ReplicationPeerConfig().setClusterKey(KEY_ONE),null); assertNumberOfPeers(1); rp.addPeer(ID_TWO,new ReplicationPeerConfig().setClusterKey(KEY_TWO),null); assertNumberOfPeers(2); try { rp.getStatusOfPeer(ID_ONE); fail("There are no connected peers, should have thrown an IllegalArgumentException"); } catch ( IllegalArgumentException e) { } assertEquals(KEY_ONE,ZKConfig.getZooKeeperClusterKey(rp.getPeerConf(ID_ONE).getSecond())); rp.removePeer(ID_ONE); rp.peerRemoved(ID_ONE); assertNumberOfPeers(1); rp.addPeer(ID_ONE,new ReplicationPeerConfig().setClusterKey(KEY_ONE),null); rp.peerAdded(ID_ONE); assertNumberOfPeers(2); assertTrue(rp.getStatusOfPeer(ID_ONE)); rp.disablePeer(ID_ONE); assertConnectedPeerStatus(false,ID_ONE); rp.enablePeer(ID_ONE); assertConnectedPeerStatus(true,ID_ONE); rp.peerRemoved(ID_ONE); assertNumberOfPeers(2); try { rp.getStatusOfPeer(ID_ONE); fail("There are no connected peers, should have thrown an IllegalArgumentException"); } catch ( IllegalArgumentException e) { } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReplicationQueuesClient() throws ReplicationException, KeeperException { rqc.init(); assertEquals(0,rqc.getListOfReplicators().size()); assertNull(rqc.getLogsInQueue(server1,"qId1")); assertNull(rqc.getAllQueues(server1)); rq1.init(server1); rq2.init(server2); rq1.addLog("qId1","trash"); rq1.removeLog("qId1","trash"); rq1.addLog("qId2","filename1"); rq1.addLog("qId3","filename2"); rq1.addLog("qId3","filename3"); rq2.addLog("trash","trash"); rq2.removeQueue("trash"); List reps=rqc.getListOfReplicators(); assertEquals(2,reps.size()); assertTrue(server1,reps.contains(server1)); assertTrue(server2,reps.contains(server2)); assertNull(rqc.getLogsInQueue("bogus","bogus")); assertNull(rqc.getLogsInQueue(server1,"bogus")); assertEquals(0,rqc.getLogsInQueue(server1,"qId1").size()); assertEquals(1,rqc.getLogsInQueue(server1,"qId2").size()); assertEquals("filename1",rqc.getLogsInQueue(server1,"qId2").get(0)); assertNull(rqc.getAllQueues("bogus")); assertEquals(0,rqc.getAllQueues(server2).size()); List list=rqc.getAllQueues(server1); assertEquals(3,list.size()); assertTrue(list.contains("qId2")); assertTrue(list.contains("qId3")); }

Class: org.apache.hadoop.hbase.replication.TestReplicationTrackerZKImpl

InternalCallVerifier EqualityVerifier 
@Test public void testGetListOfRegionServers() throws Exception { assertEquals(0,rt.getListOfRegionServers().size()); ZKUtil.createWithParents(zkw,ZKUtil.joinZNode(zkw.rsZNode,"hostname1.example.org:1234")); assertEquals(1,rt.getListOfRegionServers().size()); ZKUtil.createWithParents(zkw,ZKUtil.joinZNode(zkw.rsZNode,"hostname2.example.org:1234")); assertEquals(2,rt.getListOfRegionServers().size()); ZKUtil.deleteNode(zkw,ZKUtil.joinZNode(zkw.rsZNode,"hostname2.example.org:1234")); assertEquals(1,rt.getListOfRegionServers().size()); ZKUtil.deleteNode(zkw,ZKUtil.joinZNode(zkw.rsZNode,"hostname1.example.org:1234")); assertEquals(0,rt.getListOfRegionServers().size()); }

Class: org.apache.hadoop.hbase.replication.TestReplicationWALEntryFilters

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testScopeWALEntryFilter(){ ScopeWALEntryFilter filter=new ScopeWALEntryFilter(); Entry userEntry=createEntry(a,b); Entry userEntryA=createEntry(a); Entry userEntryB=createEntry(b); Entry userEntryEmpty=createEntry(); assertEquals(null,filter.filter(userEntry)); TreeMap scopes=new TreeMap(Bytes.BYTES_COMPARATOR); userEntry=createEntry(a,b); userEntry.getKey().setScopes(scopes); assertEquals(null,filter.filter(userEntry)); scopes=new TreeMap(Bytes.BYTES_COMPARATOR); scopes.put(c,HConstants.REPLICATION_SCOPE_GLOBAL); userEntry=createEntry(a,b); userEntry.getKey().setScopes(scopes); assertEquals(userEntryEmpty,filter.filter(userEntry)); scopes=new TreeMap(Bytes.BYTES_COMPARATOR); scopes.put(a,HConstants.REPLICATION_SCOPE_LOCAL); userEntry=createEntry(a,b); userEntry.getKey().setScopes(scopes); assertEquals(userEntryEmpty,filter.filter(userEntry)); scopes.put(b,HConstants.REPLICATION_SCOPE_LOCAL); assertEquals(userEntryEmpty,filter.filter(userEntry)); scopes=new TreeMap(Bytes.BYTES_COMPARATOR); scopes.put(a,HConstants.REPLICATION_SCOPE_GLOBAL); userEntry=createEntry(a,b); userEntry.getKey().setScopes(scopes); assertEquals(userEntryA,filter.filter(userEntry)); scopes.put(b,HConstants.REPLICATION_SCOPE_LOCAL); assertEquals(userEntryA,filter.filter(userEntry)); scopes=new TreeMap(Bytes.BYTES_COMPARATOR); scopes.put(b,HConstants.REPLICATION_SCOPE_GLOBAL); userEntry=createEntry(a,b); userEntry.getKey().setScopes(scopes); assertEquals(userEntryB,filter.filter(userEntry)); scopes.put(a,HConstants.REPLICATION_SCOPE_LOCAL); assertEquals(userEntryB,filter.filter(userEntry)); scopes=new TreeMap(Bytes.BYTES_COMPARATOR); scopes.put(b,HConstants.REPLICATION_SCOPE_GLOBAL); userEntry=createEntry(a,b); userEntry.getKey().setScopes(scopes); assertEquals(userEntryB,filter.filter(userEntry)); scopes.put(a,HConstants.REPLICATION_SCOPE_LOCAL); assertEquals(userEntryB,filter.filter(userEntry)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSystemTableWALEntryFilter(){ SystemTableWALEntryFilter filter=new SystemTableWALEntryFilter(); WALKey key1=new WALKey(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),TableName.META_TABLE_NAME); Entry metaEntry=new Entry(key1,null); assertNull(filter.filter(metaEntry)); WALKey key2=new WALKey(new byte[]{},TableName.NAMESPACE_TABLE_NAME); Entry nsEntry=new Entry(key2,null); assertNull(filter.filter(nsEntry)); WALKey key3=new WALKey(new byte[]{},TableName.valueOf("foo")); Entry userEntry=new Entry(key3,null); assertEquals(userEntry,filter.filter(userEntry)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testTableCfWALEntryFilter(){ ReplicationPeer peer=mock(ReplicationPeer.class); when(peer.getTableCFs()).thenReturn(null); Entry userEntry=createEntry(a,b,c); TableCfWALEntryFilter filter=new TableCfWALEntryFilter(peer); assertEquals(createEntry(a,b,c),filter.filter(userEntry)); userEntry=createEntry(a,b,c); Map> tableCfs=new HashMap>(); when(peer.getTableCFs()).thenReturn(tableCfs); filter=new TableCfWALEntryFilter(peer); assertEquals(null,filter.filter(userEntry)); userEntry=createEntry(a,b,c); tableCfs=new HashMap>(); tableCfs.put(TableName.valueOf("bar"),null); when(peer.getTableCFs()).thenReturn(tableCfs); filter=new TableCfWALEntryFilter(peer); assertEquals(null,filter.filter(userEntry)); userEntry=createEntry(a,b,c); tableCfs=new HashMap>(); tableCfs.put(TableName.valueOf("foo"),Lists.newArrayList("a")); when(peer.getTableCFs()).thenReturn(tableCfs); filter=new TableCfWALEntryFilter(peer); assertEquals(createEntry(a),filter.filter(userEntry)); userEntry=createEntry(a,b,c,d); tableCfs=new HashMap>(); tableCfs.put(TableName.valueOf("foo"),Lists.newArrayList("a","c")); when(peer.getTableCFs()).thenReturn(tableCfs); filter=new TableCfWALEntryFilter(peer); assertEquals(createEntry(a,c),filter.filter(userEntry)); }

Class: org.apache.hadoop.hbase.replication.TestReplicationWithTags

IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testReplicationWithCellTags() throws Exception { LOG.info("testSimplePutDelete"); Put put=new Put(ROW); put.setAttribute("visibility",Bytes.toBytes("myTag3")); put.addColumn(FAMILY,ROW,ROW); htable1=utility1.getConnection().getTable(TABLE_NAME); htable1.put(put); Get get=new Get(ROW); try { for (int i=0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } Result res=htable2.get(get); if (res.size() == 0) { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME); } else { assertArrayEquals(res.value(),ROW); assertEquals(1,TestCoprocessorForTagsAtSink.tags.size()); Tag tag=TestCoprocessorForTagsAtSink.tags.get(0); assertEquals(TAG_TYPE,tag.getType()); break; } } } finally { TestCoprocessorForTagsAtSink.tags=null; } }

Class: org.apache.hadoop.hbase.replication.regionserver.TestRegionReplicaReplicationEndpoint

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=240000) public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exception { ReplicationAdmin admin=new ReplicationAdmin(HTU.getConfiguration()); String peerId="region_replica_replication"; if (admin.getPeerConfig(peerId) != null) { admin.removePeer(peerId); } HTableDescriptor htd=HTU.createTableDescriptor("testRegionReplicaReplicationPeerIsCreatedForModifyTable"); HTU.getHBaseAdmin().createTable(htd); ReplicationPeerConfig peerConfig=admin.getPeerConfig(peerId); assertNull(peerConfig); HTU.getHBaseAdmin().disableTable(htd.getTableName()); htd.setRegionReplication(2); HTU.getHBaseAdmin().modifyTable(htd.getTableName(),htd); HTU.getHBaseAdmin().enableTable(htd.getTableName()); peerConfig=admin.getPeerConfig(peerId); assertNotNull(peerConfig); assertEquals(peerConfig.getClusterKey(),ZKConfig.getZooKeeperClusterKey(HTU.getConfiguration())); assertEquals(peerConfig.getReplicationEndpointImpl(),RegionReplicaReplicationEndpoint.class.getName()); admin.close(); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRegionReplicaReplicationPeerIsCreated() throws IOException, ReplicationException { ReplicationAdmin admin=new ReplicationAdmin(HTU.getConfiguration()); String peerId="region_replica_replication"; if (admin.getPeerConfig(peerId) != null) { admin.removePeer(peerId); } HTableDescriptor htd=HTU.createTableDescriptor("testReplicationPeerIsCreated_no_region_replicas"); HTU.getHBaseAdmin().createTable(htd); ReplicationPeerConfig peerConfig=admin.getPeerConfig(peerId); assertNull(peerConfig); htd=HTU.createTableDescriptor("testReplicationPeerIsCreated"); htd.setRegionReplication(2); HTU.getHBaseAdmin().createTable(htd); peerConfig=admin.getPeerConfig(peerId); assertNotNull(peerConfig); assertEquals(peerConfig.getClusterKey(),ZKConfig.getZooKeeperClusterKey(HTU.getConfiguration())); assertEquals(peerConfig.getReplicationEndpointImpl(),RegionReplicaReplicationEndpoint.class.getName()); admin.close(); }

Class: org.apache.hadoop.hbase.replication.regionserver.TestReplicationSink

InternalCallVerifier EqualityVerifier 
/** * Insert a mix of puts and deletes * @throws Exception */ @Test public void testMixedPutDelete() throws Exception { List entries=new ArrayList(BATCH_SIZE / 2); List cells=new ArrayList(); for (int i=0; i < BATCH_SIZE / 2; i++) { entries.add(createEntry(TABLE_NAME1,i,KeyValue.Type.Put,cells)); } SINK.replicateEntries(entries,CellUtil.createCellScanner(cells),replicationClusterId,baseNamespaceDir,hfileArchiveDir); entries=new ArrayList(BATCH_SIZE); cells=new ArrayList(); for (int i=0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1,i,i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn,cells)); } SINK.replicateEntries(entries,CellUtil.createCellScanner(cells.iterator()),replicationClusterId,baseNamespaceDir,hfileArchiveDir); Scan scan=new Scan(); ResultScanner scanRes=table1.getScanner(scan); assertEquals(BATCH_SIZE / 2,scanRes.next(BATCH_SIZE).length); }

InternalCallVerifier EqualityVerifier 
/** * Insert a whole batch of entries * @throws Exception */ @Test public void testBatchSink() throws Exception { List entries=new ArrayList(BATCH_SIZE); List cells=new ArrayList(); for (int i=0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1,i,KeyValue.Type.Put,cells)); } SINK.replicateEntries(entries,CellUtil.createCellScanner(cells.iterator()),replicationClusterId,baseNamespaceDir,hfileArchiveDir); Scan scan=new Scan(); ResultScanner scanRes=table1.getScanner(scan); assertEquals(BATCH_SIZE,scanRes.next(BATCH_SIZE).length); }

InternalCallVerifier EqualityVerifier 
/** * Puts are buffered, but this tests when a delete (not-buffered) is applied * before the actual Put that creates it. * @throws Exception */ @Test public void testApplyDeleteBeforePut() throws Exception { List entries=new ArrayList(5); List cells=new ArrayList(); for (int i=0; i < 2; i++) { entries.add(createEntry(TABLE_NAME1,i,KeyValue.Type.Put,cells)); } entries.add(createEntry(TABLE_NAME1,1,KeyValue.Type.DeleteFamily,cells)); for (int i=3; i < 5; i++) { entries.add(createEntry(TABLE_NAME1,i,KeyValue.Type.Put,cells)); } SINK.replicateEntries(entries,CellUtil.createCellScanner(cells.iterator()),replicationClusterId,baseNamespaceDir,hfileArchiveDir); Get get=new Get(Bytes.toBytes(1)); Result res=table1.get(get); assertEquals(0,res.size()); }

InternalCallVerifier EqualityVerifier 
/** * Test replicateEntries with a bulk load entry for 25 HFiles */ @Test public void testReplicateEntriesForHFiles() throws Exception { Path dir=TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries"); Path familyDir=new Path(dir,Bytes.toString(FAM_NAME1)); int numRows=10; List p=new ArrayList<>(1); Random rng=new SecureRandom(); Set numbers=new HashSet<>(); while (numbers.size() < 50) { numbers.add(rng.nextInt(1000)); } List numberList=new ArrayList<>(numbers); Collections.sort(numberList); Configuration conf=TEST_UTIL.getConfiguration(); FileSystem fs=dir.getFileSystem(conf); Iterator numbersItr=numberList.iterator(); for (int i=0; i < 25; i++) { Path hfilePath=new Path(familyDir,"hfile_" + i); HFileTestUtil.createHFile(conf,fs,hfilePath,FAM_NAME1,FAM_NAME1,Bytes.toBytes(numbersItr.next()),Bytes.toBytes(numbersItr.next()),numRows); p.add(hfilePath); } Map> storeFiles=new HashMap<>(1); storeFiles.put(FAM_NAME1,p); WALEdit edit=null; WALProtos.BulkLoadDescriptor loadDescriptor=null; try (Connection c=ConnectionFactory.createConnection(conf);RegionLocator l=c.getRegionLocator(TABLE_NAME1)){ HRegionInfo regionInfo=l.getAllRegionLocations().get(0).getRegionInfo(); loadDescriptor=ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1,ByteStringer.wrap(regionInfo.getEncodedNameAsBytes()),storeFiles,1); edit=WALEdit.createBulkLoadEvent(regionInfo,loadDescriptor); } List entries=new ArrayList(1); WALEntry.Builder builder=createWALEntryBuilder(TABLE_NAME1); for (int i=0; i < 25; i++) { String pathToHfileFromNS=new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append("hfile_" + i).toString(); String dst=baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS; FileUtil.copy(fs,p.get(0),fs,new Path(dst),false,conf); } entries.add(builder.build()); ResultScanner scanRes=null; try { Scan scan=new Scan(); scanRes=table1.getScanner(scan); assertEquals(0,scanRes.next(numRows).length); SINK.replicateEntries(entries,CellUtil.createCellScanner(edit.getCells().iterator()),replicationClusterId,baseNamespaceDir,hfileArchiveDir); scanRes=table1.getScanner(scan); assertEquals(numRows,scanRes.next(numRows).length); } finally { if (scanRes != null) { scanRes.close(); } } }

InternalCallVerifier EqualityVerifier 
/** * Insert then do different types of deletes * @throws Exception */ @Test public void testMixedDeletes() throws Exception { List entries=new ArrayList(3); List cells=new ArrayList(); for (int i=0; i < 3; i++) { entries.add(createEntry(TABLE_NAME1,i,KeyValue.Type.Put,cells)); } SINK.replicateEntries(entries,CellUtil.createCellScanner(cells.iterator()),replicationClusterId,baseNamespaceDir,hfileArchiveDir); entries=new ArrayList(3); cells=new ArrayList(); entries.add(createEntry(TABLE_NAME1,0,KeyValue.Type.DeleteColumn,cells)); entries.add(createEntry(TABLE_NAME1,1,KeyValue.Type.DeleteFamily,cells)); entries.add(createEntry(TABLE_NAME1,2,KeyValue.Type.DeleteColumn,cells)); SINK.replicateEntries(entries,CellUtil.createCellScanner(cells.iterator()),replicationClusterId,baseNamespaceDir,hfileArchiveDir); Scan scan=new Scan(); ResultScanner scanRes=table1.getScanner(scan); assertEquals(0,scanRes.next(3).length); }

Class: org.apache.hadoop.hbase.replication.regionserver.TestReplicationSinkManager

InternalCallVerifier EqualityVerifier 
@Test public void testChooseSinks(){ List serverNames=Lists.newArrayList(); for (int i=0; i < 20; i++) { serverNames.add(mock(ServerName.class)); } when(replicationEndpoint.getRegionServers()).thenReturn(serverNames); sinkManager.chooseSinks(); assertEquals(2,sinkManager.getNumSinks()); }

InternalCallVerifier EqualityVerifier 
@Test public void testReportBadSink_DownToZeroSinks(){ List serverNames=Lists.newArrayList(); for (int i=0; i < 20; i++) { serverNames.add(mock(ServerName.class)); } when(replicationEndpoint.getRegionServers()).thenReturn(serverNames); sinkManager.chooseSinks(); List sinkList=sinkManager.getSinksForTesting(); assertEquals(2,sinkList.size()); ServerName serverNameA=sinkList.get(0); ServerName serverNameB=sinkList.get(1); SinkPeer sinkPeerA=new SinkPeer(serverNameA,mock(AdminService.BlockingInterface.class)); SinkPeer sinkPeerB=new SinkPeer(serverNameB,mock(AdminService.BlockingInterface.class)); for (int i=0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) { sinkManager.reportBadSink(sinkPeerA); sinkManager.reportBadSink(sinkPeerB); } assertEquals(2,sinkManager.getNumSinks()); }

InternalCallVerifier EqualityVerifier 
@Test public void testChooseSinks_LessThanRatioAvailable(){ List serverNames=Lists.newArrayList(mock(ServerName.class),mock(ServerName.class)); when(replicationEndpoint.getRegionServers()).thenReturn(serverNames); sinkManager.chooseSinks(); assertEquals(1,sinkManager.getNumSinks()); }

InternalCallVerifier EqualityVerifier 
/** * Once a SinkPeer has been reported as bad more than BAD_SINK_THRESHOLD times, it should not * be replicated to anymore. */ @Test public void testReportBadSink_PastThreshold(){ List serverNames=Lists.newArrayList(); for (int i=0; i < 30; i++) { serverNames.add(mock(ServerName.class)); } when(replicationEndpoint.getRegionServers()).thenReturn(serverNames); sinkManager.chooseSinks(); assertEquals(3,sinkManager.getNumSinks()); ServerName serverName=sinkManager.getSinksForTesting().get(0); SinkPeer sinkPeer=new SinkPeer(serverName,mock(AdminService.BlockingInterface.class)); sinkManager.reportSinkSuccess(sinkPeer); for (int i=0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD; i++) { sinkManager.reportBadSink(sinkPeer); } assertEquals(2,sinkManager.getNumSinks()); serverName=sinkManager.getSinksForTesting().get(0); sinkPeer=new SinkPeer(serverName,mock(AdminService.BlockingInterface.class)); for (int i=0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD - 1; i++) { sinkManager.reportBadSink(sinkPeer); } sinkManager.reportSinkSuccess(sinkPeer); sinkManager.reportBadSink(sinkPeer); assertEquals(2,sinkManager.getNumSinks()); for (int i=0; i <= ReplicationSinkManager.DEFAULT_BAD_SINK_THRESHOLD - 2; i++) { sinkManager.reportBadSink(sinkPeer); } assertEquals(2,sinkManager.getNumSinks()); sinkManager.reportBadSink(sinkPeer); assertEquals(1,sinkManager.getNumSinks()); }

InternalCallVerifier EqualityVerifier 
@Test public void testReportBadSink(){ ServerName serverNameA=mock(ServerName.class); ServerName serverNameB=mock(ServerName.class); when(replicationEndpoint.getRegionServers()).thenReturn(Lists.newArrayList(serverNameA,serverNameB)); sinkManager.chooseSinks(); assertEquals(1,sinkManager.getNumSinks()); SinkPeer sinkPeer=new SinkPeer(serverNameA,mock(AdminService.BlockingInterface.class)); sinkManager.reportBadSink(sinkPeer); assertEquals(1,sinkManager.getNumSinks()); }

Class: org.apache.hadoop.hbase.replication.regionserver.TestReplicationSourceManager

InternalCallVerifier BooleanVerifier 
@Test public void testBulkLoadWALEdits() throws Exception { WALKey logKey=new WALKey(); WALEdit logEdit=getBulkLoadWALEdit(); Configuration bulkLoadConf=HBaseConfiguration.create(conf); bulkLoadConf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,true); Replication.scopeWALEdits(htd,logKey,logEdit,bulkLoadConf,manager); NavigableMap scopes=logKey.getScopes(); assertTrue("This family scope is set to global, should be part of replication key scopes.",scopes.containsKey(f1)); assertFalse("This family scope is set to local, should not be part of replication key scopes",scopes.containsKey(f2)); }

InternalCallVerifier EqualityVerifier 
@Test public void testCleanupFailoverQueues() throws Exception { final Server server=new DummyServer("hostname1.example.org"); ReplicationQueues rq=ReplicationFactory.getReplicationQueues(server.getZooKeeper(),server.getConfiguration(),server); rq.init(server.getServerName().toString()); SortedSet files=new TreeSet(); String group="testgroup"; String file1=group + ".log1"; String file2=group + ".log2"; files.add(file1); files.add(file2); for ( String file : files) { rq.addLog("1",file); } Server s1=new DummyServer("dummyserver1.example.org"); ReplicationQueues rq1=ReplicationFactory.getReplicationQueues(s1.getZooKeeper(),s1.getConfiguration(),s1); rq1.init(s1.getServerName().toString()); ReplicationPeers rp1=ReplicationFactory.getReplicationPeers(s1.getZooKeeper(),s1.getConfiguration(),s1); rp1.init(); NodeFailoverWorker w1=manager.new NodeFailoverWorker(server.getServerName().getServerName(),rq1,rp1,new UUID(new Long(1),new Long(2))); w1.start(); w1.join(5000); assertEquals(1,manager.getWalsByIdRecoveredQueues().size()); String id="1-" + server.getServerName().getServerName(); assertEquals(files,manager.getWalsByIdRecoveredQueues().get(id).get(group)); manager.cleanOldLogs(file2,id,true); assertEquals(Sets.newHashSet(file2),manager.getWalsByIdRecoveredQueues().get(id).get(group)); }

InternalCallVerifier EqualityVerifier 
@Test public void testClaimQueues() throws Exception { LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti"); conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI,true); final Server server=new DummyServer("hostname0.example.org"); ReplicationQueues rq=ReplicationFactory.getReplicationQueues(server.getZooKeeper(),server.getConfiguration(),server); rq.init(server.getServerName().toString()); files.add("log1"); files.add("log2"); for ( String file : files) { rq.addLog("1",file); } Server s1=new DummyServer("dummyserver1.example.org"); Server s2=new DummyServer("dummyserver2.example.org"); Server s3=new DummyServer("dummyserver3.example.org"); DummyNodeFailoverWorker w1=new DummyNodeFailoverWorker(server.getServerName().getServerName(),s1); DummyNodeFailoverWorker w2=new DummyNodeFailoverWorker(server.getServerName().getServerName(),s2); DummyNodeFailoverWorker w3=new DummyNodeFailoverWorker(server.getServerName().getServerName(),s3); latch=new CountDownLatch(3); w1.start(); w2.start(); w3.start(); int populatedMap=0; latch.await(); populatedMap+=w1.isLogZnodesMapPopulated() + w2.isLogZnodesMapPopulated() + w3.isLogZnodesMapPopulated(); assertEquals(1,populatedMap); server.abort("",null); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testNodeFailoverDeadServerParsing() throws Exception { LOG.debug("testNodeFailoverDeadServerParsing"); conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI,true); final Server server=new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com"); ReplicationQueues repQueues=ReplicationFactory.getReplicationQueues(server.getZooKeeper(),conf,server); repQueues.init(server.getServerName().toString()); files.add("log1"); files.add("log2"); for ( String file : files) { repQueues.addLog("1",file); } Server s1=new DummyServer("ip-10-8-101-114.ec2.internal"); Server s2=new DummyServer("ec2-107-20-52-47.compute-1.amazonaws.com"); Server s3=new DummyServer("ec2-23-20-187-167.compute-1.amazonaws.com"); ReplicationQueues rq1=ReplicationFactory.getReplicationQueues(s1.getZooKeeper(),s1.getConfiguration(),s1); rq1.init(s1.getServerName().toString()); SortedMap> testMap=rq1.claimQueues(server.getServerName().getServerName()); ReplicationQueues rq2=ReplicationFactory.getReplicationQueues(s2.getZooKeeper(),s2.getConfiguration(),s2); rq2.init(s2.getServerName().toString()); testMap=rq2.claimQueues(s1.getServerName().getServerName()); ReplicationQueues rq3=ReplicationFactory.getReplicationQueues(s3.getZooKeeper(),s3.getConfiguration(),s3); rq3.init(s3.getServerName().toString()); testMap=rq3.claimQueues(s2.getServerName().getServerName()); ReplicationQueueInfo replicationQueueInfo=new ReplicationQueueInfo(testMap.firstKey()); List result=replicationQueueInfo.getDeadRegionServers(); assertTrue(result.contains(server.getServerName().getServerName())); assertTrue(result.contains(s1.getServerName().getServerName())); assertTrue(result.contains(s2.getServerName().getServerName())); server.abort("",null); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLogRoll() throws Exception { long baseline=1000; long time=baseline; MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(); KeyValue kv=new KeyValue(r1,f1,r1); WALEdit edit=new WALEdit(); edit.add(kv); List listeners=new ArrayList(); listeners.add(replication); final WALFactory wals=new WALFactory(utility.getConfiguration(),listeners,URLEncoder.encode("regionserver:60020","UTF8")); final WAL wal=wals.getWAL(hri.getEncodedNameAsBytes(),hri.getTable().getNamespace()); manager.init(); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("tableame")); htd.addFamily(new HColumnDescriptor(f1)); for (long i=1; i < 101; i++) { if (i > 1 && i % 20 == 0) { wal.rollWriter(); } LOG.info(i); final long txid=wal.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),test,System.currentTimeMillis(),mvcc),edit,true); wal.sync(txid); } LOG.info(baseline + " and " + time); baseline+=101; time=baseline; LOG.info(baseline + " and " + time); for (int i=0; i < 3; i++) { wal.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),test,System.currentTimeMillis(),mvcc),edit,true); } wal.sync(); int logNumber=0; for ( Map.Entry> entry : manager.getWALs().get(slaveId).entrySet()) { logNumber+=entry.getValue().size(); } assertEquals(6,logNumber); wal.rollWriter(); manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(),"1",0,false,false); wal.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),test,System.currentTimeMillis(),mvcc),edit,true); wal.sync(); assertEquals(1,manager.getWALs().size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testFailoverDeadServerCversionChange() throws Exception { LOG.debug("testFailoverDeadServerCversionChange"); conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI,true); final Server s0=new DummyServer("cversion-change0.example.org"); ReplicationQueues repQueues=ReplicationFactory.getReplicationQueues(s0.getZooKeeper(),conf,s0); repQueues.init(s0.getServerName().toString()); files.add("log1"); files.add("log2"); for ( String file : files) { repQueues.addLog("1",file); } Server s1=new DummyServer("cversion-change1.example.org"); ReplicationQueues rq1=ReplicationFactory.getReplicationQueues(s1.getZooKeeper(),s1.getConfiguration(),s1); rq1.init(s1.getServerName().toString()); ReplicationQueuesClient client=ReplicationFactory.getReplicationQueuesClient(s1.getZooKeeper(),s1.getConfiguration(),s1); int v0=client.getQueuesZNodeCversion(); rq1.claimQueues(s0.getServerName().getServerName()); int v1=client.getQueuesZNodeCversion(); assertEquals(v0 + 1,v1); s0.abort("",null); }

Class: org.apache.hadoop.hbase.replication.regionserver.TestReplicationThrottler

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * unit test for throttling */ @Test(timeout=10000) public void testThrottling(){ LOG.info("testThrottling"); ReplicationThrottler throttler1=new ReplicationThrottler(100); ReplicationThrottler throttler2=new ReplicationThrottler(10); long ticks1=throttler1.getNextSleepInterval(1000); long ticks2=throttler2.getNextSleepInterval(1000); assertEquals(0,ticks1); assertEquals(0,ticks2); throttler1.addPushSize(1000); throttler2.addPushSize(1000); ticks1=throttler1.getNextSleepInterval(5); ticks2=throttler2.getNextSleepInterval(5); if (ticks1 != 1000 && ticks1 != 999) { assertTrue(ticks1 >= 750 && ticks1 <= 1000); } if (ticks2 != 10000 && ticks2 != 9999) { assertTrue(ticks1 >= 7500 && ticks1 <= 10000); } throttler1.resetStartTick(); throttler2.resetStartTick(); throttler1.addPushSize(5); throttler2.addPushSize(5); ticks1=throttler1.getNextSleepInterval(45); ticks2=throttler2.getNextSleepInterval(45); assertTrue(ticks1 == 0); if (ticks2 != 100 && ticks2 != 99) { assertTrue(ticks1 >= 75 && ticks1 <= 100); } throttler2.resetStartTick(); throttler1.addPushSize(45); throttler2.addPushSize(45); ticks1=throttler1.getNextSleepInterval(60); ticks2=throttler2.getNextSleepInterval(60); if (ticks1 != 100 && ticks1 != 99) { assertTrue(ticks1 >= 75 && ticks1 <= 100); } if (ticks2 != 500 && ticks2 != 499) { assertTrue(ticks1 >= 375 && ticks1 <= 500); } }

Class: org.apache.hadoop.hbase.replication.regionserver.TestReplicationWALReaderManager

IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void test() throws Exception { Path path=pathWatcher.currentPath; assertEquals(0,logManager.getPosition()); appendToLog(); assertNotNull(logManager.openReader(path)); logManager.seek(); WAL.Entry entry=logManager.readNextAndSetPosition(); assertNotNull(entry); entry=logManager.readNextAndSetPosition(); assertNull(entry); logManager.closeReader(); long oldPos=logManager.getPosition(); appendToLog(); assertNotNull(logManager.openReader(path)); logManager.seek(); entry=logManager.readNextAndSetPosition(); assertNotEquals(oldPos,logManager.getPosition()); assertNotNull(entry); logManager.closeReader(); oldPos=logManager.getPosition(); log.rollWriter(); assertNotNull(logManager.openReader(path)); logManager.seek(); entry=logManager.readNextAndSetPosition(); assertEquals(oldPos,logManager.getPosition()); assertNull(entry); logManager.finishCurrentFile(); path=pathWatcher.currentPath; for (int i=0; i < nbRows; i++) { appendToLogPlus(walEditKVs); } log.rollWriter(); logManager.openReader(path); logManager.seek(); for (int i=0; i < nbRows; i++) { WAL.Entry e=logManager.readNextAndSetPosition(); if (e == null) { fail("Should have enough entries"); } } }

Class: org.apache.hadoop.hbase.rest.TestGZIPResponseWrapper

InternalCallVerifier EqualityVerifier 
@Test public void testReset() throws IOException { when(response.isCommitted()).thenReturn(false); ServletOutputStream out=mock(ServletOutputStream.class); when(response.getOutputStream()).thenReturn(out); ServletOutputStream servletOutput=wrapper.getOutputStream(); verify(response).addHeader("Content-Encoding","gzip"); assertEquals(GZIPResponseStream.class,servletOutput.getClass()); wrapper.reset(); verify(response).setHeader("Content-Encoding",null); when(response.isCommitted()).thenReturn(true); servletOutput=wrapper.getOutputStream(); assertEquals(out.getClass(),servletOutput.getClass()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testResetBuffer() throws IOException { when(response.isCommitted()).thenReturn(false); ServletOutputStream out=mock(ServletOutputStream.class); when(response.getOutputStream()).thenReturn(out); ServletOutputStream servletOutput=wrapper.getOutputStream(); assertEquals(GZIPResponseStream.class,servletOutput.getClass()); wrapper.resetBuffer(); verify(response).setHeader("Content-Encoding",null); when(response.isCommitted()).thenReturn(true); servletOutput=wrapper.getOutputStream(); assertEquals(out.getClass(),servletOutput.getClass()); assertNotNull(wrapper.getWriter()); }

Class: org.apache.hadoop.hbase.rest.TestGetAndPutResource

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMetrics() throws IOException, JAXBException { final String path="/" + TABLE + "/"+ ROW_4+ "/"+ COLUMN_1; Response response=client.put(path,Constants.MIMETYPE_BINARY,Bytes.toBytes(VALUE_4)); assertEquals(response.getCode(),200); Thread.yield(); response=client.get(path,Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); response=deleteRow(TABLE,ROW_4); assertEquals(response.getCode(),200); UserProvider userProvider=UserProvider.instantiate(conf); METRICS_ASSERT.assertCounterGt("requests",2l,RESTServlet.getInstance(conf,userProvider).getMetrics().getSource()); METRICS_ASSERT.assertCounterGt("successfulGet",0l,RESTServlet.getInstance(conf,userProvider).getMetrics().getSource()); METRICS_ASSERT.assertCounterGt("successfulPut",0l,RESTServlet.getInstance(conf,userProvider).getMetrics().getSource()); METRICS_ASSERT.assertCounterGt("successfulDelete",0l,RESTServlet.getInstance(conf,userProvider).getMetrics().getSource()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiCellGetPutPB() throws IOException { String path="/" + TABLE + "/fakerow"; CellSetModel cellSetModel=new CellSetModel(); RowModel rowModel=new RowModel(ROW_1); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_1))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel=new RowModel(ROW_2); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_3))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); Response response=client.put(path,Constants.MIMETYPE_PROTOBUF,cellSetModel.createProtobufOutput()); Thread.yield(); response=client.get(path,Constants.MIMETYPE_PROTOBUF); assertEquals(response.getCode(),404); checkValuePB(TABLE,ROW_1,COLUMN_1,VALUE_1); checkValuePB(TABLE,ROW_1,COLUMN_2,VALUE_2); checkValuePB(TABLE,ROW_2,COLUMN_1,VALUE_3); checkValuePB(TABLE,ROW_2,COLUMN_2,VALUE_4); response=deleteRow(TABLE,ROW_1); assertEquals(response.getCode(),200); response=deleteRow(TABLE,ROW_2); assertEquals(response.getCode(),200); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiCellGetJson() throws IOException, JAXBException { String path="/" + TABLE + "/fakerow"; CellSetModel cellSetModel=new CellSetModel(); RowModel rowModel=new RowModel(ROW_1); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_1))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel=new RowModel(ROW_2); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_3))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); String jsonString=jsonMapper.writeValueAsString(cellSetModel); Response response=client.put(path,Constants.MIMETYPE_JSON,Bytes.toBytes(jsonString)); Thread.yield(); response=client.get(path,Constants.MIMETYPE_JSON); assertEquals(response.getCode(),404); checkValueJSON(TABLE,ROW_1,COLUMN_1,VALUE_1); checkValueJSON(TABLE,ROW_1,COLUMN_2,VALUE_2); checkValueJSON(TABLE,ROW_2,COLUMN_1,VALUE_3); checkValueJSON(TABLE,ROW_2,COLUMN_2,VALUE_4); response=deleteRow(TABLE,ROW_1); assertEquals(response.getCode(),200); response=deleteRow(TABLE,ROW_2); assertEquals(response.getCode(),200); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSuffixGlobbingXML() throws IOException, JAXBException { String path="/" + TABLE + "/fakerow"; CellSetModel cellSetModel=new CellSetModel(); RowModel rowModel=new RowModel(ROW_1); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_1))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel=new RowModel(ROW_2); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_3))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer=new StringWriter(); xmlMarshaller.marshal(cellSetModel,writer); Response response=client.put(path,Constants.MIMETYPE_XML,Bytes.toBytes(writer.toString())); Thread.yield(); response=client.get(path,Constants.MIMETYPE_XML); assertEquals(response.getCode(),404); StringBuilder query=new StringBuilder(); query.append('/'); query.append(TABLE); query.append('/'); query.append("testrow*"); query.append('/'); query.append(COLUMN_1); response=client.get(query.toString(),Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); CellSetModel cellSet=(CellSetModel)xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); List rows=cellSet.getRows(); assertTrue(rows.size() == 2); for ( RowModel row : rows) { assertTrue(row.getCells().size() == 1); assertEquals(COLUMN_1,Bytes.toString(row.getCells().get(0).getColumn())); } response=deleteRow(TABLE,ROW_1); assertEquals(response.getCode(),200); response=deleteRow(TABLE,ROW_2); assertEquals(response.getCode(),200); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testStartEndRowGetPutXML() throws IOException, JAXBException { String[] rows={ROW_1,ROW_2,ROW_3}; String[] values={VALUE_1,VALUE_2,VALUE_3}; Response response=null; for (int i=0; i < rows.length; i++) { response=putValueXML(TABLE,rows[i],COLUMN_1,values[i]); assertEquals(200,response.getCode()); checkValueXML(TABLE,rows[i],COLUMN_1,values[i]); } response=getValueXML(TABLE,rows[0],rows[2],COLUMN_1); assertEquals(200,response.getCode()); CellSetModel cellSet=(CellSetModel)xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(2,cellSet.getRows().size()); for (int i=0; i < cellSet.getRows().size() - 1; i++) { RowModel rowModel=cellSet.getRows().get(i); for ( CellModel cell : rowModel.getCells()) { assertEquals(COLUMN_1,Bytes.toString(cell.getColumn())); assertEquals(values[i],Bytes.toString(cell.getValue())); } } for ( String row : rows) { response=deleteRow(TABLE,row); assertEquals(200,response.getCode()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLatestCellGetJSON() throws IOException, JAXBException { final String path="/" + TABLE + "/"+ ROW_4+ "/"+ COLUMN_1; CellSetModel cellSetModel=new CellSetModel(); RowModel rowModel=new RowModel(ROW_4); CellModel cellOne=new CellModel(Bytes.toBytes(COLUMN_1),1L,Bytes.toBytes(VALUE_1)); CellModel cellTwo=new CellModel(Bytes.toBytes(COLUMN_1),2L,Bytes.toBytes(VALUE_2)); rowModel.addCell(cellOne); rowModel.addCell(cellTwo); cellSetModel.addRow(rowModel); String jsonString=jsonMapper.writeValueAsString(cellSetModel); Response response=client.put(path,Constants.MIMETYPE_JSON,Bytes.toBytes(jsonString)); assertEquals(response.getCode(),200); Thread.yield(); response=client.get(path,Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); CellSetModel cellSet=jsonMapper.readValue(response.getBody(),CellSetModel.class); assertTrue(cellSet.getRows().size() == 1); assertTrue(cellSet.getRows().get(0).getCells().size() == 1); CellModel cell=cellSet.getRows().get(0).getCells().get(0); assertEquals(VALUE_2,Bytes.toString(cell.getValue())); assertEquals(2L,cell.getTimestamp()); response=deleteRow(TABLE,ROW_4); assertEquals(response.getCode(),200); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSingleCellGetJSON() throws IOException, JAXBException { final String path="/" + TABLE + "/"+ ROW_4+ "/"+ COLUMN_1; Response response=client.put(path,Constants.MIMETYPE_BINARY,Bytes.toBytes(VALUE_4)); assertEquals(response.getCode(),200); Thread.yield(); response=client.get(path,Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); response=deleteRow(TABLE,ROW_4); assertEquals(response.getCode(),200); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleCellGetPutBinary() throws IOException { final String path="/" + TABLE + "/"+ ROW_3+ "/"+ COLUMN_1; final byte[] body=Bytes.toBytes(VALUE_3); Response response=client.put(path,Constants.MIMETYPE_BINARY,body); assertEquals(response.getCode(),200); Thread.yield(); response=client.get(path,Constants.MIMETYPE_BINARY); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_BINARY,response.getHeader("content-type")); assertTrue(Bytes.equals(response.getBody(),body)); boolean foundTimestampHeader=false; for ( Header header : response.getHeaders()) { if (header.getName().equals("X-Timestamp")) { foundTimestampHeader=true; break; } } assertTrue(foundTimestampHeader); response=deleteRow(TABLE,ROW_3); assertEquals(response.getCode(),200); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiColumnGetXML() throws Exception { String path="/" + TABLE + "/fakerow"; CellSetModel cellSetModel=new CellSetModel(); RowModel rowModel=new RowModel(ROW_1); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_1))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_2))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_3),Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); StringWriter writer=new StringWriter(); xmlMarshaller.marshal(cellSetModel,writer); Response response=client.put(path,Constants.MIMETYPE_XML,Bytes.toBytes(writer.toString())); Thread.yield(); response=client.get(path,Constants.MIMETYPE_XML); assertEquals(response.getCode(),404); path="/" + TABLE + "/"+ ROW_1+ "/"+ COLUMN_1+ ","+ COLUMN_2+ ","+ COLUMN_3; response=client.get(path,Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); CellSetModel cellSet=(CellSetModel)xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertTrue(cellSet.getRows().size() == 1); assertTrue(cellSet.getRows().get(0).getCells().size() == 3); List cells=cellSet.getRows().get(0).getCells(); assertTrue(containsCellModel(cells,COLUMN_1,VALUE_1)); assertTrue(containsCellModel(cells,COLUMN_2,VALUE_2)); assertTrue(containsCellModel(cells,COLUMN_3,VALUE_2)); response=deleteRow(TABLE,ROW_1); assertEquals(response.getCode(),200); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBException { String path="/" + TABLE + "/fakerow"; CellSetModel cellSetModel=new CellSetModel(); RowModel rowModel=new RowModel(ROW_1); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_1))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel=new RowModel(ROW_2); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1),Bytes.toBytes(VALUE_3))); rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2),Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer=new StringWriter(); xmlMarshaller.marshal(cellSetModel,writer); Response response=client.put(path,Constants.MIMETYPE_XML,Bytes.toBytes(writer.toString())); Thread.yield(); response=client.get(path,Constants.MIMETYPE_XML); assertEquals(response.getCode(),404); StringBuilder query=new StringBuilder(); query.append('/'); query.append(TABLE); query.append('/'); query.append("testrow*"); response=client.get(query.toString(),Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); CellSetModel cellSet=(CellSetModel)xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertTrue(cellSet.getRows().size() == 2); response=deleteRow(TABLE,ROW_1); assertEquals(response.getCode(),200); response=deleteRow(TABLE,ROW_2); assertEquals(response.getCode(),200); }

Class: org.apache.hadoop.hbase.rest.TestGzipFilter

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testErrorNotGzipped() throws Exception { Header[] headers=new Header[2]; headers[0]=new Header("Accept",Constants.MIMETYPE_BINARY); headers[1]=new Header("Accept-Encoding","gzip"); Response response=client.get("/" + TABLE + "/"+ ROW_1+ "/"+ COLUMN_2,headers); assertEquals(response.getCode(),404); String contentEncoding=response.getHeader("Content-Encoding"); assertTrue(contentEncoding == null || !contentEncoding.contains("gzip")); response=client.get("/" + TABLE,headers); assertEquals(response.getCode(),405); contentEncoding=response.getHeader("Content-Encoding"); assertTrue(contentEncoding == null || !contentEncoding.contains("gzip")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testGzipFilter() throws Exception { String path="/" + TABLE + "/"+ ROW_1+ "/"+ COLUMN_1; ByteArrayOutputStream bos=new ByteArrayOutputStream(); GZIPOutputStream os=new GZIPOutputStream(bos); os.write(VALUE_1); os.close(); byte[] value_1_gzip=bos.toByteArray(); Header[] headers=new Header[2]; headers[0]=new Header("Content-Type",Constants.MIMETYPE_BINARY); headers[1]=new Header("Content-Encoding","gzip"); Response response=client.put(path,headers,value_1_gzip); assertEquals(response.getCode(),200); Table table=TEST_UTIL.getConnection().getTable(TABLE); Get get=new Get(Bytes.toBytes(ROW_1)); get.addColumn(Bytes.toBytes(CFA),Bytes.toBytes("1")); Result result=table.get(get); byte[] value=result.getValue(Bytes.toBytes(CFA),Bytes.toBytes("1")); assertNotNull(value); assertTrue(Bytes.equals(value,VALUE_1)); headers[0]=new Header("Accept",Constants.MIMETYPE_BINARY); headers[1]=new Header("Accept-Encoding","gzip"); response=client.get(path,headers); assertEquals(response.getCode(),200); ByteArrayInputStream bis=new ByteArrayInputStream(response.getBody()); GZIPInputStream is=new GZIPInputStream(bis); value=new byte[VALUE_1.length]; is.read(value,0,VALUE_1.length); assertTrue(Bytes.equals(value,VALUE_1)); is.close(); table.close(); testScannerResultCodes(); }

Class: org.apache.hadoop.hbase.rest.TestMultiRowResource

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiCellGetJSONNotFound() throws IOException, JAXBException { String row_5_url="/" + TABLE + "/"+ ROW_1+ "/"+ COLUMN_1; StringBuilder path=new StringBuilder(); path.append("/"); path.append(TABLE); path.append("/multiget/?row="); path.append(ROW_1); path.append("&row="); path.append(ROW_2); client.post(row_5_url,Constants.MIMETYPE_BINARY,Bytes.toBytes(VALUE_1)); Response response=client.get(path.toString(),Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); ObjectMapper mapper=new JacksonProvider().locateMapper(CellSetModel.class,MediaType.APPLICATION_JSON_TYPE); CellSetModel cellSet=(CellSetModel)mapper.readValue(response.getBody(),CellSetModel.class); assertEquals(1,cellSet.getRows().size()); assertEquals(ROW_1,Bytes.toString(cellSet.getRows().get(0).getKey())); assertEquals(VALUE_1,Bytes.toString(cellSet.getRows().get(0).getCells().get(0).getValue())); client.delete(row_5_url); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiCellGetJSON() throws IOException, JAXBException { String row_5_url="/" + TABLE + "/"+ ROW_1+ "/"+ COLUMN_1; String row_6_url="/" + TABLE + "/"+ ROW_2+ "/"+ COLUMN_2; StringBuilder path=new StringBuilder(); path.append("/"); path.append(TABLE); path.append("/multiget/?row="); path.append(ROW_1); path.append("&row="); path.append(ROW_2); client.post(row_5_url,Constants.MIMETYPE_BINARY,Bytes.toBytes(VALUE_1)); client.post(row_6_url,Constants.MIMETYPE_BINARY,Bytes.toBytes(VALUE_2)); Response response=client.get(path.toString(),Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); client.delete(row_5_url); client.delete(row_6_url); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMultiCellGetXML() throws IOException, JAXBException { String row_5_url="/" + TABLE + "/"+ ROW_1+ "/"+ COLUMN_1; String row_6_url="/" + TABLE + "/"+ ROW_2+ "/"+ COLUMN_2; StringBuilder path=new StringBuilder(); path.append("/"); path.append(TABLE); path.append("/multiget/?row="); path.append(ROW_1); path.append("&row="); path.append(ROW_2); client.post(row_5_url,Constants.MIMETYPE_BINARY,Bytes.toBytes(VALUE_1)); client.post(row_6_url,Constants.MIMETYPE_BINARY,Bytes.toBytes(VALUE_2)); Response response=client.get(path.toString(),Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); client.delete(row_5_url); client.delete(row_6_url); }

Class: org.apache.hadoop.hbase.rest.TestNamespacesInstanceResource

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException, JAXBException { String namespacePath3="/namespaces/" + NAMESPACE3; String namespacePath4="/namespaces/" + NAMESPACE4; NamespacesInstanceModel model3; NamespacesInstanceModel model4; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertNull(findNamespace(admin,NAMESPACE3)); assertNull(findNamespace(admin,NAMESPACE4)); model3=testNamespacesInstanceModel.buildTestModel(NAMESPACE3,NAMESPACE3_PROPS); testNamespacesInstanceModel.checkModel(model3,NAMESPACE3,NAMESPACE3_PROPS); model4=testNamespacesInstanceModel.buildTestModel(NAMESPACE4,NAMESPACE4_PROPS); testNamespacesInstanceModel.checkModel(model4,NAMESPACE4,NAMESPACE4_PROPS); response=client.put(namespacePath3,Constants.MIMETYPE_BINARY,new byte[]{}); assertEquals(403,response.getCode()); response=client.put(namespacePath4,Constants.MIMETYPE_PROTOBUF,model4.createProtobufOutput()); assertEquals(403,response.getCode()); conf.set("hbase.rest.readonly","true"); response=client.post(namespacePath3,Constants.MIMETYPE_BINARY,new byte[]{}); assertEquals(403,response.getCode()); response=client.put(namespacePath4,Constants.MIMETYPE_PROTOBUF,model4.createProtobufOutput()); assertEquals(403,response.getCode()); NamespaceDescriptor nd3=findNamespace(admin,NAMESPACE3); NamespaceDescriptor nd4=findNamespace(admin,NAMESPACE4); assertNull(nd3); assertNull(nd4); conf.set("hbase.rest.readonly","false"); response=client.post(namespacePath3,Constants.MIMETYPE_BINARY,new byte[]{}); assertEquals(201,response.getCode()); response=client.post(namespacePath4,Constants.MIMETYPE_PROTOBUF,model4.createProtobufOutput()); assertEquals(201,response.getCode()); nd3=findNamespace(admin,NAMESPACE3); nd4=findNamespace(admin,NAMESPACE4); assertNotNull(nd3); assertNotNull(nd4); checkNamespaceProperties(nd3,new HashMap()); checkNamespaceProperties(nd4,NAMESPACE4_PROPS); response=client.post(namespacePath3,Constants.MIMETYPE_BINARY,new byte[]{}); assertEquals(403,response.getCode()); response=client.post(namespacePath4,Constants.MIMETYPE_PROTOBUF,model4.createProtobufOutput()); assertEquals(403,response.getCode()); conf.set("hbase.rest.readonly","true"); response=client.delete(namespacePath3); assertEquals(403,response.getCode()); response=client.delete(namespacePath4); assertEquals(403,response.getCode()); nd3=findNamespace(admin,NAMESPACE3); nd4=findNamespace(admin,NAMESPACE4); assertNotNull(nd3); assertNotNull(nd4); conf.set("hbase.rest.readonly","false"); response=client.delete(namespacePath3); assertEquals(200,response.getCode()); response=client.delete(namespacePath4); assertEquals(200,response.getCode()); nd3=findNamespace(admin,NAMESPACE3); nd4=findNamespace(admin,NAMESPACE4); assertNull(nd3); assertNull(nd4); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetNamespaceTablesAndCannotDeleteNamespace() throws IOException, JAXBException { Admin admin=TEST_UTIL.getHBaseAdmin(); String nsName="TestNamespacesInstanceResource5"; Response response; NamespaceDescriptor.Builder nsBuilder=NamespaceDescriptor.create(nsName); NamespaceDescriptor nsd=nsBuilder.build(); nsd.setConfiguration("key1","value1"); admin.createNamespace(nsd); HColumnDescriptor colDesc=new HColumnDescriptor("cf1"); TableName tn1=TableName.valueOf(nsName + ":table1"); HTableDescriptor table=new HTableDescriptor(tn1); table.addFamily(colDesc); admin.createTable(table); TableName tn2=TableName.valueOf(nsName + ":table2"); table=new HTableDescriptor(tn2); table.addFamily(colDesc); admin.createTable(table); Map nsProperties=new HashMap(); nsProperties.put("key1","value1"); List nsTables=Arrays.asList("table1","table2"); String namespacePath="/namespaces/" + nsName; response=client.get(namespacePath); assertEquals(200,response.getCode()); response=client.get(namespacePath,Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); NamespacesInstanceModel model=fromXML(response.getBody()); checkNamespaceProperties(model.getProperties(),nsProperties); response=client.get(namespacePath,Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); model=jsonMapper.readValue(response.getBody(),NamespacesInstanceModel.class); checkNamespaceProperties(model.getProperties(),nsProperties); response=client.get(namespacePath,Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); model.getObjectFromMessage(response.getBody()); checkNamespaceProperties(model.getProperties(),nsProperties); namespacePath="/namespaces/" + nsName + "/tables"; response=client.get(namespacePath); assertEquals(200,response.getCode()); response=client.get(namespacePath,Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); TableListModel tablemodel=fromXML(response.getBody()); checkNamespaceTables(tablemodel.getTables(),nsTables); response=client.get(namespacePath,Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); tablemodel=jsonMapper.readValue(response.getBody(),TableListModel.class); checkNamespaceTables(tablemodel.getTables(),nsTables); response=client.get(namespacePath,Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); tablemodel.setTables(new ArrayList()); tablemodel.getObjectFromMessage(response.getBody()); checkNamespaceTables(tablemodel.getTables(),nsTables); response=client.delete(namespacePath); namespacePath="/namespaces/" + nsName; assertEquals(503,response.getCode()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidNamespacePostsAndPuts() throws IOException, JAXBException { String namespacePath1="/namespaces/" + NAMESPACE1; String namespacePath2="/namespaces/" + NAMESPACE2; String namespacePath3="/namespaces/" + NAMESPACE3; NamespacesInstanceModel model1; NamespacesInstanceModel model2; NamespacesInstanceModel model3; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertNull(findNamespace(admin,NAMESPACE1)); assertNull(findNamespace(admin,NAMESPACE2)); assertNull(findNamespace(admin,NAMESPACE3)); model1=testNamespacesInstanceModel.buildTestModel(NAMESPACE1,NAMESPACE1_PROPS); testNamespacesInstanceModel.checkModel(model1,NAMESPACE1,NAMESPACE1_PROPS); model2=testNamespacesInstanceModel.buildTestModel(NAMESPACE2,NAMESPACE2_PROPS); testNamespacesInstanceModel.checkModel(model2,NAMESPACE2,NAMESPACE2_PROPS); model3=testNamespacesInstanceModel.buildTestModel(NAMESPACE3,NAMESPACE3_PROPS); testNamespacesInstanceModel.checkModel(model3,NAMESPACE3,NAMESPACE3_PROPS); response=client.post(namespacePath1,Constants.MIMETYPE_JSON,toXML(model1)); assertEquals(500,response.getCode()); String jsonString=jsonMapper.writeValueAsString(model2); response=client.put(namespacePath2,Constants.MIMETYPE_XML,Bytes.toBytes(jsonString)); assertEquals(400,response.getCode()); response=client.post(namespacePath3,Constants.MIMETYPE_PROTOBUF,toXML(model1)); assertEquals(500,response.getCode()); NamespaceDescriptor nd1=findNamespace(admin,NAMESPACE1); NamespaceDescriptor nd2=findNamespace(admin,NAMESPACE2); NamespaceDescriptor nd3=findNamespace(admin,NAMESPACE3); assertNull(nd1); assertNull(nd2); assertNull(nd3); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNamespaceCreateAndDeleteXMLAndJSON() throws IOException, JAXBException { String namespacePath1="/namespaces/" + NAMESPACE1; String namespacePath2="/namespaces/" + NAMESPACE2; NamespacesInstanceModel model1; NamespacesInstanceModel model2; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertNull(findNamespace(admin,NAMESPACE1)); assertNull(findNamespace(admin,NAMESPACE2)); model1=testNamespacesInstanceModel.buildTestModel(NAMESPACE1,NAMESPACE1_PROPS); testNamespacesInstanceModel.checkModel(model1,NAMESPACE1,NAMESPACE1_PROPS); model2=testNamespacesInstanceModel.buildTestModel(NAMESPACE2,NAMESPACE2_PROPS); testNamespacesInstanceModel.checkModel(model2,NAMESPACE2,NAMESPACE2_PROPS); response=client.put(namespacePath1,Constants.MIMETYPE_XML,toXML(model1)); assertEquals(403,response.getCode()); String jsonString=jsonMapper.writeValueAsString(model2); response=client.put(namespacePath2,Constants.MIMETYPE_JSON,Bytes.toBytes(jsonString)); assertEquals(403,response.getCode()); conf.set("hbase.rest.readonly","true"); response=client.post(namespacePath1,Constants.MIMETYPE_XML,toXML(model1)); assertEquals(403,response.getCode()); jsonString=jsonMapper.writeValueAsString(model2); response=client.post(namespacePath2,Constants.MIMETYPE_JSON,Bytes.toBytes(jsonString)); assertEquals(403,response.getCode()); NamespaceDescriptor nd1=findNamespace(admin,NAMESPACE1); NamespaceDescriptor nd2=findNamespace(admin,NAMESPACE2); assertNull(nd1); assertNull(nd2); conf.set("hbase.rest.readonly","false"); response=client.post(namespacePath1,Constants.MIMETYPE_XML,toXML(model1)); assertEquals(201,response.getCode()); jsonString=jsonMapper.writeValueAsString(model2); response=client.post(namespacePath2,Constants.MIMETYPE_JSON,Bytes.toBytes(jsonString)); assertEquals(201,response.getCode()); nd1=findNamespace(admin,NAMESPACE1); nd2=findNamespace(admin,NAMESPACE2); assertNotNull(nd1); assertNotNull(nd2); checkNamespaceProperties(nd1,NAMESPACE1_PROPS); checkNamespaceProperties(nd1,NAMESPACE1_PROPS); conf.set("hbase.rest.readonly","true"); response=client.delete(namespacePath1); assertEquals(403,response.getCode()); response=client.delete(namespacePath2); assertEquals(403,response.getCode()); nd1=findNamespace(admin,NAMESPACE1); nd2=findNamespace(admin,NAMESPACE2); assertNotNull(nd1); assertNotNull(nd2); conf.set("hbase.rest.readonly","false"); response=client.delete(namespacePath1); assertEquals(200,response.getCode()); response=client.delete(namespacePath2); assertEquals(200,response.getCode()); nd1=findNamespace(admin,NAMESPACE1); nd2=findNamespace(admin,NAMESPACE2); assertNull(nd1); assertNull(nd2); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCannotDeleteDefaultAndHbaseNamespaces() throws IOException { String defaultPath="/namespaces/default"; String hbasePath="/namespaces/hbase"; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertNotNull(findNamespace(admin,"default")); assertNotNull(findNamespace(admin,"hbase")); response=client.delete(defaultPath); assertEquals(503,response.getCode()); response=client.delete(hbasePath); assertEquals(503,response.getCode()); assertNotNull(findNamespace(admin,"default")); assertNotNull(findNamespace(admin,"hbase")); }

Class: org.apache.hadoop.hbase.rest.TestNamespacesResource

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNamespaceListPBandDefault() throws IOException, JAXBException { String schemaPath="/namespaces/"; NamespacesModel model; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertFalse(doesNamespaceExist(admin,NAMESPACE1)); model=testNamespacesModel.buildTestModel(); testNamespacesModel.checkModel(model); response=client.get(schemaPath,Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); model.getObjectFromMessage(response.getBody()); testNamespacesModel.checkModel(model,"hbase","default"); response=client.get(schemaPath); assertEquals(200,response.getCode()); createNamespaceViaAdmin(admin,NAMESPACE1); response=client.get(schemaPath,Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); model.getObjectFromMessage(response.getBody()); testNamespacesModel.checkModel(model,NAMESPACE1,"hbase","default"); response=client.get(schemaPath); assertEquals(200,response.getCode()); createNamespaceViaAdmin(admin,NAMESPACE2); response=client.get(schemaPath,Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); model.getObjectFromMessage(response.getBody()); testNamespacesModel.checkModel(model,NAMESPACE1,NAMESPACE2,"hbase","default"); response=client.get(schemaPath); assertEquals(200,response.getCode()); admin.deleteNamespace(NAMESPACE1); response=client.get(schemaPath,Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); model.getObjectFromMessage(response.getBody()); testNamespacesModel.checkModel(model,NAMESPACE2,"hbase","default"); response=client.get(schemaPath); assertEquals(200,response.getCode()); admin.deleteNamespace(NAMESPACE2); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNamespaceListXMLandJSON() throws IOException, JAXBException { String namespacePath="/namespaces/"; NamespacesModel model; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertFalse(doesNamespaceExist(admin,NAMESPACE1)); model=testNamespacesModel.buildTestModel(); testNamespacesModel.checkModel(model); response=client.get(namespacePath,Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); model=fromXML(response.getBody()); testNamespacesModel.checkModel(model,"hbase","default"); response=client.get(namespacePath,Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); model=testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); testNamespacesModel.checkModel(model,"hbase","default"); createNamespaceViaAdmin(admin,NAMESPACE1); response=client.get(namespacePath,Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); model=fromXML(response.getBody()); testNamespacesModel.checkModel(model,NAMESPACE1,"hbase","default"); response=client.get(namespacePath,Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); model=testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); testNamespacesModel.checkModel(model,NAMESPACE1,"hbase","default"); createNamespaceViaAdmin(admin,NAMESPACE2); response=client.get(namespacePath,Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); model=fromXML(response.getBody()); testNamespacesModel.checkModel(model,NAMESPACE1,NAMESPACE2,"hbase","default"); response=client.get(namespacePath,Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); model=testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); testNamespacesModel.checkModel(model,NAMESPACE1,NAMESPACE2,"hbase","default"); admin.deleteNamespace(NAMESPACE1); response=client.get(namespacePath,Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); model=fromXML(response.getBody()); testNamespacesModel.checkModel(model,NAMESPACE2,"hbase","default"); response=client.get(namespacePath,Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); model=testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); testNamespacesModel.checkModel(model,NAMESPACE2,"hbase","default"); admin.deleteNamespace(NAMESPACE2); }

Class: org.apache.hadoop.hbase.rest.TestResourceFilter

InternalCallVerifier EqualityVerifier 
@Test public void testFilter() throws Exception { String path="/status/cluster"; Response response=client.get(path); assertEquals(404,response.getCode()); }

Class: org.apache.hadoop.hbase.rest.TestScannerResource

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSimpleScannerXML() throws IOException, JAXBException { final int BATCH_SIZE=5; ScannerModel model=new ScannerModel(); model.setBatch(BATCH_SIZE); model.addColumn(Bytes.toBytes(COLUMN_1)); StringWriter writer=new StringWriter(); marshaller.marshal(model,writer); byte[] body=Bytes.toBytes(writer.toString()); conf.set("hbase.rest.readonly","true"); Response response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_XML,body); assertEquals(response.getCode(),403); String scannerURI=response.getLocation(); assertNull(scannerURI); conf.set("hbase.rest.readonly","false"); response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_XML,body); assertEquals(response.getCode(),201); scannerURI=response.getLocation(); assertNotNull(scannerURI); response=client.get(scannerURI,Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); CellSetModel cellSet=(CellSetModel)unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(countCellSet(cellSet),BATCH_SIZE); conf.set("hbase.rest.readonly","true"); response=client.delete(scannerURI); assertEquals(response.getCode(),403); conf.set("hbase.rest.readonly","false"); response=client.delete(scannerURI); assertEquals(response.getCode(),200); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSimpleScannerBinary() throws IOException { ScannerModel model=new ScannerModel(); model.setBatch(1); model.addColumn(Bytes.toBytes(COLUMN_1)); conf.set("hbase.rest.readonly","true"); Response response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_PROTOBUF,model.createProtobufOutput()); assertEquals(response.getCode(),403); String scannerURI=response.getLocation(); assertNull(scannerURI); conf.set("hbase.rest.readonly","false"); response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_PROTOBUF,model.createProtobufOutput()); assertEquals(response.getCode(),201); scannerURI=response.getLocation(); assertNotNull(scannerURI); response=client.get(scannerURI,Constants.MIMETYPE_BINARY); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_BINARY,response.getHeader("content-type")); assertTrue(response.getBody().length > 0); boolean foundRowHeader=false, foundColumnHeader=false, foundTimestampHeader=false; for ( Header header : response.getHeaders()) { if (header.getName().equals("X-Row")) { foundRowHeader=true; } else if (header.getName().equals("X-Column")) { foundColumnHeader=true; } else if (header.getName().equals("X-Timestamp")) { foundTimestampHeader=true; } } assertTrue(foundRowHeader); assertTrue(foundColumnHeader); assertTrue(foundTimestampHeader); conf.set("hbase.rest.readonly","true"); response=client.delete(scannerURI); assertEquals(response.getCode(),403); conf.set("hbase.rest.readonly","false"); response=client.delete(scannerURI); assertEquals(response.getCode(),200); }

InternalCallVerifier EqualityVerifier 
@Test public void testFullTableScan() throws IOException { ScannerModel model=new ScannerModel(); model.addColumn(Bytes.toBytes(COLUMN_1)); assertEquals(fullTableScan(model),expectedRows1); model=new ScannerModel(); model.addColumn(Bytes.toBytes(COLUMN_2)); assertEquals(fullTableScan(model),expectedRows2); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testTableDoesNotExist() throws IOException, JAXBException { ScannerModel model=new ScannerModel(); StringWriter writer=new StringWriter(); marshaller.marshal(model,writer); byte[] body=Bytes.toBytes(writer.toString()); Response response=client.put("/" + NONEXISTENT_TABLE + "/scanner",Constants.MIMETYPE_XML,body); assertEquals(response.getCode(),404); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSimpleScannerPB() throws IOException { final int BATCH_SIZE=10; ScannerModel model=new ScannerModel(); model.setBatch(BATCH_SIZE); model.addColumn(Bytes.toBytes(COLUMN_1)); conf.set("hbase.rest.readonly","true"); Response response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_PROTOBUF,model.createProtobufOutput()); assertEquals(response.getCode(),403); String scannerURI=response.getLocation(); assertNull(scannerURI); conf.set("hbase.rest.readonly","false"); response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_PROTOBUF,model.createProtobufOutput()); assertEquals(response.getCode(),201); scannerURI=response.getLocation(); assertNotNull(scannerURI); response=client.get(scannerURI,Constants.MIMETYPE_PROTOBUF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); CellSetModel cellSet=new CellSetModel(); cellSet.getObjectFromMessage(response.getBody()); assertEquals(countCellSet(cellSet),BATCH_SIZE); conf.set("hbase.rest.readonly","true"); response=client.delete(scannerURI); assertEquals(response.getCode(),403); conf.set("hbase.rest.readonly","false"); response=client.delete(scannerURI); assertEquals(response.getCode(),200); }

Class: org.apache.hadoop.hbase.rest.TestScannersWithLabels

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSimpleScannerXMLWithLabelsThatReceivesData() throws IOException, JAXBException { ScannerModel model=new ScannerModel(); model.setBatch(5); model.addColumn(Bytes.toBytes(COLUMN_1)); model.addLabel(SECRET); StringWriter writer=new StringWriter(); marshaller.marshal(model,writer); byte[] body=Bytes.toBytes(writer.toString()); conf.set("hbase.rest.readonly","false"); Response response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_XML,body); assertEquals(response.getCode(),201); String scannerURI=response.getLocation(); assertNotNull(scannerURI); response=client.get(scannerURI,Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); CellSetModel cellSet=(CellSetModel)unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(countCellSet(cellSet),5); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSimpleScannerXMLWithLabelsThatReceivesNoData() throws IOException, JAXBException { final int BATCH_SIZE=5; ScannerModel model=new ScannerModel(); model.setBatch(BATCH_SIZE); model.addColumn(Bytes.toBytes(COLUMN_1)); model.addLabel(PUBLIC); StringWriter writer=new StringWriter(); marshaller.marshal(model,writer); byte[] body=Bytes.toBytes(writer.toString()); conf.set("hbase.rest.readonly","false"); Response response=client.put("/" + TABLE + "/scanner",Constants.MIMETYPE_XML,body); assertEquals(response.getCode(),201); String scannerURI=response.getLocation(); assertNotNull(scannerURI); response=client.get(scannerURI,Constants.MIMETYPE_XML); assertEquals(response.getCode(),204); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); }

Class: org.apache.hadoop.hbase.rest.TestSchemaResource

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTableCreateAndDeletePB() throws IOException, JAXBException { String schemaPath="/" + TABLE2 + "/schema"; TableSchemaModel model; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertFalse(admin.tableExists(TableName.valueOf(TABLE2))); model=testTableSchemaModel.buildTestModel(TABLE2); testTableSchemaModel.checkModel(model,TABLE2); response=client.put(schemaPath,Constants.MIMETYPE_PROTOBUF,model.createProtobufOutput()); assertEquals(response.getCode(),201); conf.set("hbase.rest.readonly","true"); response=client.put(schemaPath,Constants.MIMETYPE_PROTOBUF,model.createProtobufOutput()); assertEquals(response.getCode(),403); response=client.get(schemaPath,Constants.MIMETYPE_PROTOBUF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); model=new TableSchemaModel(); model.getObjectFromMessage(response.getBody()); testTableSchemaModel.checkModel(model,TABLE2); response=client.get(schemaPath,Constants.MIMETYPE_PROTOBUF_IETF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF_IETF,response.getHeader("content-type")); model=new TableSchemaModel(); model.getObjectFromMessage(response.getBody()); testTableSchemaModel.checkModel(model,TABLE2); response=client.delete(schemaPath); assertEquals(response.getCode(),403); conf.set("hbase.rest.readonly","false"); response=client.delete(schemaPath); assertEquals(response.getCode(),200); assertFalse(admin.tableExists(TableName.valueOf(TABLE2))); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTableCreateAndDeleteXML() throws IOException, JAXBException { String schemaPath="/" + TABLE1 + "/schema"; TableSchemaModel model; Response response; Admin admin=TEST_UTIL.getHBaseAdmin(); assertFalse(admin.tableExists(TableName.valueOf(TABLE1))); model=testTableSchemaModel.buildTestModel(TABLE1); testTableSchemaModel.checkModel(model,TABLE1); response=client.put(schemaPath,Constants.MIMETYPE_XML,toXML(model)); assertEquals(response.getCode(),201); conf.set("hbase.rest.readonly","true"); response=client.put(schemaPath,Constants.MIMETYPE_XML,toXML(model)); assertEquals(response.getCode(),403); response=client.get(schemaPath,Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); model=fromXML(response.getBody()); testTableSchemaModel.checkModel(model,TABLE1); response=client.get(schemaPath,Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); model=testTableSchemaModel.fromJSON(Bytes.toString(response.getBody())); testTableSchemaModel.checkModel(model,TABLE1); response=client.delete(schemaPath); assertEquals(response.getCode(),403); conf.set("hbase.rest.readonly","false"); response=client.delete(schemaPath); assertEquals(response.getCode(),200); assertFalse(admin.tableExists(TableName.valueOf(TABLE1))); }

Class: org.apache.hadoop.hbase.rest.TestStatusResource

InternalCallVerifier EqualityVerifier 
@Test public void testGetClusterStatusXML() throws IOException, JAXBException { Response response=client.get("/status/cluster",Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); StorageClusterStatusModel model=(StorageClusterStatusModel)context.createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetClusterStatusPB() throws IOException { Response response=client.get("/status/cluster",Constants.MIMETYPE_PROTOBUF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); StorageClusterStatusModel model=new StorageClusterStatusModel(); model.getObjectFromMessage(response.getBody()); validate(model); response=client.get("/status/cluster",Constants.MIMETYPE_PROTOBUF_IETF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF_IETF,response.getHeader("content-type")); model=new StorageClusterStatusModel(); model.getObjectFromMessage(response.getBody()); validate(model); }

Class: org.apache.hadoop.hbase.rest.TestTableResource

InternalCallVerifier EqualityVerifier 
@Test public void testTableListText() throws IOException { Response response=client.get("/",Constants.MIMETYPE_TEXT); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_TEXT,response.getHeader("content-type")); }

InternalCallVerifier EqualityVerifier 
@Test public void testTableListXML() throws IOException, JAXBException { Response response=client.get("/",Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); TableListModel model=(TableListModel)context.createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); checkTableList(model); }

InternalCallVerifier EqualityVerifier 
@Test public void testTableInfoText() throws IOException { Response response=client.get("/" + TABLE + "/regions",Constants.MIMETYPE_TEXT); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_TEXT,response.getHeader("content-type")); }

InternalCallVerifier EqualityVerifier 
@Test public void testTableInfoPB() throws IOException, JAXBException { Response response=client.get("/" + TABLE + "/regions",Constants.MIMETYPE_PROTOBUF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); TableInfoModel model=new TableInfoModel(); model.getObjectFromMessage(response.getBody()); checkTableInfo(model); response=client.get("/" + TABLE + "/regions",Constants.MIMETYPE_PROTOBUF_IETF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF_IETF,response.getHeader("content-type")); model=new TableInfoModel(); model.getObjectFromMessage(response.getBody()); checkTableInfo(model); }

InternalCallVerifier EqualityVerifier 
@Test public void testTableInfoJSON() throws IOException { Response response=client.get("/" + TABLE + "/regions",Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); }

InternalCallVerifier EqualityVerifier 
@Test public void testTableListPB() throws IOException, JAXBException { Response response=client.get("/",Constants.MIMETYPE_PROTOBUF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); TableListModel model=new TableListModel(); model.getObjectFromMessage(response.getBody()); checkTableList(model); response=client.get("/",Constants.MIMETYPE_PROTOBUF_IETF); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_PROTOBUF_IETF,response.getHeader("content-type")); model=new TableListModel(); model.getObjectFromMessage(response.getBody()); checkTableList(model); }

InternalCallVerifier EqualityVerifier 
@Test public void testTableListJSON() throws IOException { Response response=client.get("/",Constants.MIMETYPE_JSON); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); }

InternalCallVerifier EqualityVerifier 
@Test public void testTableInfoXML() throws IOException, JAXBException { Response response=client.get("/" + TABLE + "/regions",Constants.MIMETYPE_XML); assertEquals(response.getCode(),200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); TableInfoModel model=(TableInfoModel)context.createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); checkTableInfo(model); }

Class: org.apache.hadoop.hbase.rest.TestTableScan

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testStreamingJSON() throws Exception { StringBuilder builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=20"); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); ObjectMapper mapper=new JacksonProvider().locateMapper(CellSetModel.class,MediaType.APPLICATION_JSON_TYPE); CellSetModel model=mapper.readValue(response.getStream(),CellSetModel.class); int count=TestScannerResource.countCellSet(model); assertEquals(20,count); checkRowsNotNull(model); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); model=mapper.readValue(response.getStream(),CellSetModel.class); count=TestScannerResource.countCellSet(model); assertEquals(expectedRows2,count); checkRowsNotNull(model); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); count=0; JsonFactory jfactory=new JsonFactory(mapper); JsonParser jParser=jfactory.createJsonParser(response.getStream()); boolean found=false; while (jParser.nextToken() != JsonToken.END_OBJECT) { if (jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { RowModel row=jParser.readValueAs(RowModel.class); assertNotNull(row.getKey()); for (int i=0; i < row.getCells().size(); i++) { if (count == 0) { assertEquals("aaa",Bytes.toString(row.getKey())); } if (count == 23) { assertEquals("aax",Bytes.toString(row.getKey())); } count++; } jParser.skipChildren(); } else { found=jParser.getCurrentToken() == JsonToken.START_ARRAY; } } assertEquals(24,count); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testNegativeCustomFilter() throws IOException, JAXBException { StringBuilder builder=new StringBuilder(); builder=new StringBuilder(); builder.append("/b*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')","UTF-8")); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); JAXBContext ctx=JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush=ctx.createUnmarshaller(); CellSetModel model=(CellSetModel)ush.unmarshal(response.getStream()); int count=TestScannerResource.countCellSet(model); assertEquals(0,count); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testScanningUnknownColumnJson() throws IOException, JAXBException { StringBuilder builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=a:test"); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); ObjectMapper mapper=new JacksonProvider().locateMapper(CellSetModel.class,MediaType.APPLICATION_JSON_TYPE); CellSetModel model=mapper.readValue(response.getStream(),CellSetModel.class); int count=TestScannerResource.countCellSet(model); assertEquals(0,count); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * An example to scan using listener in unmarshaller for XML. * @throws Exception the exception */ @Test public void testScanUsingListenerUnmarshallerXML() throws Exception { StringBuilder builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); JAXBContext context=JAXBContext.newInstance(ClientSideCellSetModel.class,RowModel.class,CellModel.class); Unmarshaller unmarshaller=context.createUnmarshaller(); final ClientSideCellSetModel.Listener listener=new ClientSideCellSetModel.Listener(){ @Override public void handleRowModel( ClientSideCellSetModel helper, RowModel row){ assertTrue(row.getKey() != null); assertTrue(row.getCells().size() > 0); } } ; unmarshaller.setListener(new Unmarshaller.Listener(){ public void beforeUnmarshal( Object target, Object parent){ if (target instanceof ClientSideCellSetModel) { ((ClientSideCellSetModel)target).setCellSetModelListener(listener); } } public void afterUnmarshal( Object target, Object parent){ if (target instanceof ClientSideCellSetModel) { ((ClientSideCellSetModel)target).setCellSetModelListener(null); } } } ); SAXParserFactory factory=SAXParserFactory.newInstance(); factory.setNamespaceAware(true); XMLReader reader=factory.newSAXParser().getXMLReader(); reader.setContentHandler(unmarshaller.getUnmarshallerHandler()); assertFalse(ClientSideCellSetModel.listenerInvoked); reader.parse(new InputSource(response.getStream())); assertTrue(ClientSideCellSetModel.listenerInvoked); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSimpleScannerProtobuf() throws Exception { StringBuilder builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); int rowCount=readProtobufStream(response.getStream()); assertEquals(15,rowCount); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_PROTOBUF); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); rowCount=readProtobufStream(response.getStream()); assertEquals(24,rowCount); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSimpleFilter() throws IOException, JAXBException { StringBuilder builder=new StringBuilder(); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("PrefixFilter('aab')","UTF-8")); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); JAXBContext ctx=JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush=ctx.createUnmarshaller(); CellSetModel model=(CellSetModel)ush.unmarshal(response.getStream()); int count=TestScannerResource.countCellSet(model); assertEquals(1,count); assertEquals("aab",new String(model.getRows().get(0).getCells().get(0).getValue())); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCompoundFilter() throws IOException, JAXBException { StringBuilder builder=new StringBuilder(); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("PrefixFilter('abc') AND QualifierFilter(=,'binary:1')","UTF-8")); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); JAXBContext ctx=JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush=ctx.createUnmarshaller(); CellSetModel model=(CellSetModel)ush.unmarshal(response.getStream()); int count=TestScannerResource.countCellSet(model); assertEquals(1,count); assertEquals("abc",new String(model.getRows().get(0).getCells().get(0).getValue())); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSimpleScannerJson() throws IOException, JAXBException { StringBuilder builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=20"); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); ObjectMapper mapper=new JacksonProvider().locateMapper(CellSetModel.class,MediaType.APPLICATION_JSON_TYPE); CellSetModel model=mapper.readValue(response.getStream(),CellSetModel.class); int count=TestScannerResource.countCellSet(model); assertEquals(20,count); checkRowsNotNull(model); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); model=mapper.readValue(response.getStream(),CellSetModel.class); count=TestScannerResource.countCellSet(model); assertEquals(expectedRows2,count); checkRowsNotNull(model); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_JSON); assertEquals(200,response.getCode()); model=mapper.readValue(response.getStream(),CellSetModel.class); RowModel startRow=model.getRows().get(0); assertEquals("aaa",Bytes.toString(startRow.getKey())); RowModel endRow=model.getRows().get(model.getRows().size() - 1); assertEquals("aax",Bytes.toString(endRow.getKey())); count=TestScannerResource.countCellSet(model); assertEquals(24,count); checkRowsNotNull(model); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSimpleScannerXML() throws IOException, JAXBException, XMLStreamException { StringBuilder builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); JAXBContext ctx=JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush=ctx.createUnmarshaller(); CellSetModel model=(CellSetModel)ush.unmarshal(response.getStream()); int count=TestScannerResource.countCellSet(model); assertEquals(10,count); checkRowsNotNull(model); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); model=(CellSetModel)ush.unmarshal(response.getStream()); count=TestScannerResource.countCellSet(model); assertEquals(expectedRows1,count); checkRowsNotNull(model); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); model=(CellSetModel)ush.unmarshal(response.getStream()); count=TestScannerResource.countCellSet(model); RowModel startRow=model.getRows().get(0); assertEquals("aaa",Bytes.toString(startRow.getKey())); RowModel endRow=model.getRows().get(model.getRows().size() - 1); assertEquals("aax",Bytes.toString(endRow.getKey())); assertEquals(24,count); checkRowsNotNull(model); builder=new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); model=(CellSetModel)ush.unmarshal(response.getStream()); startRow=model.getRows().get(0); assertEquals("aaa",Bytes.toString(startRow.getKey())); count=TestScannerResource.countCellSet(model); assertEquals(15,count); checkRowsNotNull(model); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCustomFilter() throws IOException, JAXBException { StringBuilder builder=new StringBuilder(); builder=new StringBuilder(); builder.append("/a*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')","UTF-8")); Response response=client.get("/" + TABLE + builder.toString(),Constants.MIMETYPE_XML); assertEquals(200,response.getCode()); JAXBContext ctx=JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush=ctx.createUnmarshaller(); CellSetModel model=(CellSetModel)ush.unmarshal(response.getStream()); int count=TestScannerResource.countCellSet(model); assertEquals(1,count); assertEquals("abc",new String(model.getRows().get(0).getCells().get(0).getValue())); }

Class: org.apache.hadoop.hbase.rest.TestVersionResource

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetStorageClusterVersionXML() throws IOException, JAXBException { Response response=client.get("/version/cluster",Constants.MIMETYPE_XML); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); StorageClusterVersionModel clusterVersionModel=(StorageClusterVersionModel)context.createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); assertNotNull(clusterVersionModel); assertNotNull(clusterVersionModel.getVersion()); LOG.info("success retrieving storage cluster version as XML"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetStargateVersionJSON() throws IOException { Response response=client.get("/version",Constants.MIMETYPE_JSON); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetStargateVersionText() throws IOException { Response response=client.get("/version",Constants.MIMETYPE_TEXT); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_TEXT,response.getHeader("content-type")); String body=Bytes.toString(response.getBody()); assertTrue(body.length() > 0); assertTrue(body.contains(RESTServlet.VERSION_STRING)); assertTrue(body.contains(System.getProperty("java.vm.vendor"))); assertTrue(body.contains(System.getProperty("java.version"))); assertTrue(body.contains(System.getProperty("java.vm.version"))); assertTrue(body.contains(System.getProperty("os.name"))); assertTrue(body.contains(System.getProperty("os.version"))); assertTrue(body.contains(System.getProperty("os.arch"))); assertTrue(body.contains(ServletContainer.class.getPackage().getImplementationVersion())); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetStargateVersionXML() throws IOException, JAXBException { Response response=client.get("/version",Constants.MIMETYPE_XML); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_XML,response.getHeader("content-type")); VersionModel model=(VersionModel)context.createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); LOG.info("success retrieving Stargate version as XML"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetStorageClusterVersionText() throws IOException { Response response=client.get("/version/cluster",Constants.MIMETYPE_TEXT); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_TEXT,response.getHeader("content-type")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetStargateVersionPB() throws IOException { Response response=client.get("/version",Constants.MIMETYPE_PROTOBUF); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_PROTOBUF,response.getHeader("content-type")); VersionModel model=new VersionModel(); model.getObjectFromMessage(response.getBody()); validate(model); response=client.get("/version",Constants.MIMETYPE_PROTOBUF_IETF); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_PROTOBUF_IETF,response.getHeader("content-type")); model=new VersionModel(); model.getObjectFromMessage(response.getBody()); validate(model); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void doTestGetStorageClusterVersionJSON() throws IOException { Response response=client.get("/version/cluster",Constants.MIMETYPE_JSON); assertTrue(response.getCode() == 200); assertEquals(Constants.MIMETYPE_JSON,response.getHeader("content-type")); }

Class: org.apache.hadoop.hbase.rest.client.TestRemoteTable

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetTableDescriptor() throws IOException { Table table=null; try { table=TEST_UTIL.getConnection().getTable(TABLE); HTableDescriptor local=table.getTableDescriptor(); assertEquals(remoteTable.getTableDescriptor(),local); } finally { if (null != table) table.close(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test a some methods of class Response. */ @Test public void testResponse(){ Response response=new Response(200); assertEquals(200,response.getCode()); Header[] headers=new Header[2]; headers[0]=new Header("header1","value1"); headers[1]=new Header("header2","value2"); response=new Response(200,headers); assertEquals("value1",response.getHeader("header1")); assertFalse(response.hasBody()); response.setCode(404); assertEquals(404,response.getCode()); headers=new Header[2]; headers[0]=new Header("header1","value1.1"); headers[1]=new Header("header2","value2"); response.setHeaders(headers); assertEquals("value1.1",response.getHeader("header1")); response.setBody(Bytes.toBytes("body")); assertTrue(response.hasBody()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test RemoteHable.Scanner.iterator method */ @Test public void testIteratorScaner() throws IOException { List puts=new ArrayList(); Put put=new Put(ROW_1); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); put=new Put(ROW_2); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); put=new Put(ROW_3); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); put=new Put(ROW_4); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); remoteTable.put(puts); ResultScanner scanner=remoteTable.getScanner(new Scan()); Iterator iterator=scanner.iterator(); assertTrue(iterator.hasNext()); int counter=0; while (iterator.hasNext()) { iterator.next(); counter++; } assertEquals(4,counter); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testPut() throws IOException { Put put=new Put(ROW_3); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); remoteTable.put(put); Get get=new Get(ROW_3); get.addFamily(COLUMN_1); Result result=remoteTable.get(get); byte[] value=result.getValue(COLUMN_1,QUALIFIER_1); assertNotNull(value); assertTrue(Bytes.equals(VALUE_1,value)); List puts=new ArrayList(); put=new Put(ROW_3); put.addColumn(COLUMN_2,QUALIFIER_2,VALUE_2); puts.add(put); put=new Put(ROW_4); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); put=new Put(ROW_4); put.addColumn(COLUMN_2,QUALIFIER_2,VALUE_2); puts.add(put); remoteTable.put(puts); get=new Get(ROW_3); get.addFamily(COLUMN_2); result=remoteTable.get(get); value=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value); assertTrue(Bytes.equals(VALUE_2,value)); get=new Get(ROW_4); result=remoteTable.get(get); value=result.getValue(COLUMN_1,QUALIFIER_1); assertNotNull(value); assertTrue(Bytes.equals(VALUE_1,value)); value=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value); assertTrue(Bytes.equals(VALUE_2,value)); assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable"),remoteTable.getTableName())); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test RemoteHTable.Scanner */ @Test public void testScanner() throws IOException { List puts=new ArrayList(); Put put=new Put(ROW_1); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); put=new Put(ROW_2); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); put=new Put(ROW_3); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); put=new Put(ROW_4); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); puts.add(put); remoteTable.put(puts); ResultScanner scanner=remoteTable.getScanner(new Scan()); Result[] results=scanner.next(1); assertNotNull(results); assertEquals(1,results.length); assertTrue(Bytes.equals(ROW_1,results[0].getRow())); Result result=scanner.next(); assertNotNull(result); assertTrue(Bytes.equals(ROW_2,result.getRow())); results=scanner.next(2); assertNotNull(results); assertEquals(2,results.length); assertTrue(Bytes.equals(ROW_3,results[0].getRow())); assertTrue(Bytes.equals(ROW_4,results[1].getRow())); results=scanner.next(1); assertNull(results); scanner.close(); scanner=remoteTable.getScanner(COLUMN_1); results=scanner.next(4); assertNotNull(results); assertEquals(4,results.length); assertTrue(Bytes.equals(ROW_1,results[0].getRow())); assertTrue(Bytes.equals(ROW_2,results[1].getRow())); assertTrue(Bytes.equals(ROW_3,results[2].getRow())); assertTrue(Bytes.equals(ROW_4,results[3].getRow())); scanner.close(); scanner=remoteTable.getScanner(COLUMN_1,QUALIFIER_1); results=scanner.next(4); assertNotNull(results); assertEquals(4,results.length); assertTrue(Bytes.equals(ROW_1,results[0].getRow())); assertTrue(Bytes.equals(ROW_2,results[1].getRow())); assertTrue(Bytes.equals(ROW_3,results[2].getRow())); assertTrue(Bytes.equals(ROW_4,results[3].getRow())); scanner.close(); assertTrue(remoteTable.isAutoFlush()); }

BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGet() throws IOException { Get get=new Get(ROW_1); Result result=remoteTable.get(get); byte[] value1=result.getValue(COLUMN_1,QUALIFIER_1); byte[] value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNull(value2); get=new Get(ROW_1); get.addFamily(COLUMN_3); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNull(value1); assertNull(value2); get=new Get(ROW_1); get.addColumn(COLUMN_1,QUALIFIER_1); get.addColumn(COLUMN_2,QUALIFIER_2); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNull(value2); get=new Get(ROW_2); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_2,value1)); assertNotNull(value2); assertTrue(Bytes.equals(VALUE_2,value2)); get=new Get(ROW_2); get.addFamily(COLUMN_1); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_2,value1)); assertNull(value2); get=new Get(ROW_2); get.addColumn(COLUMN_1,QUALIFIER_1); get.addColumn(COLUMN_2,QUALIFIER_2); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_2,value1)); assertNotNull(value2); assertTrue(Bytes.equals(VALUE_2,value2)); get=new Get(ROW_2); get.addFamily(COLUMN_1); get.addFamily(COLUMN_2); get.setTimeStamp(TS_1); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNull(value2); get=new Get(ROW_2); get.addFamily(COLUMN_1); get.addFamily(COLUMN_2); get.setTimeRange(0,TS_1 + 1); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNull(value2); get=new Get(ROW_2); get.addFamily(COLUMN_1); get.setMaxVersions(2); result=remoteTable.get(get); int count=0; for ( Cell kv : result.listCells()) { if (CellUtil.matchingFamily(kv,COLUMN_1) && TS_1 == kv.getTimestamp()) { assertTrue(CellUtil.matchingValue(kv,VALUE_1)); count++; } if (CellUtil.matchingFamily(kv,COLUMN_1) && TS_2 == kv.getTimestamp()) { assertTrue(CellUtil.matchingValue(kv,VALUE_2)); count++; } } assertEquals(2,count); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiGet() throws Exception { ArrayList gets=new ArrayList(); gets.add(new Get(ROW_1)); gets.add(new Get(ROW_2)); Result[] results=remoteTable.get(gets); assertNotNull(results); assertEquals(2,results.length); assertEquals(1,results[0].size()); assertEquals(2,results[1].size()); gets=new ArrayList(); Get g=new Get(ROW_1); g.setMaxVersions(3); gets.add(g); gets.add(new Get(ROW_2)); results=remoteTable.get(gets); assertNotNull(results); assertEquals(2,results.length); assertEquals(1,results[0].size()); assertEquals(3,results[1].size()); gets=new ArrayList(); gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); results=remoteTable.get(gets); assertNotNull(results); assertEquals(0,results.length); gets=new ArrayList(); gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); gets.add(new Get(ROW_1)); gets.add(new Get(ROW_2)); results=remoteTable.get(gets); assertNotNull(results); assertEquals(2,results.length); }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testDelete() throws IOException { Put put=new Put(ROW_3); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); put.addColumn(COLUMN_2,QUALIFIER_2,VALUE_2); remoteTable.put(put); Get get=new Get(ROW_3); get.addFamily(COLUMN_1); get.addFamily(COLUMN_2); Result result=remoteTable.get(get); byte[] value1=result.getValue(COLUMN_1,QUALIFIER_1); byte[] value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNotNull(value2); assertTrue(Bytes.equals(VALUE_2,value2)); Delete delete=new Delete(ROW_3); delete.addColumn(COLUMN_2,QUALIFIER_2); remoteTable.delete(delete); get=new Get(ROW_3); get.addFamily(COLUMN_1); get.addFamily(COLUMN_2); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNull(value2); delete=new Delete(ROW_3); delete.setTimestamp(1L); remoteTable.delete(delete); get=new Get(ROW_3); get.addFamily(COLUMN_1); get.addFamily(COLUMN_2); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNull(value2); delete=new Delete(ROW_3); remoteTable.delete(delete); get=new Get(ROW_3); get.addFamily(COLUMN_1); get.addFamily(COLUMN_2); result=remoteTable.get(get); value1=result.getValue(COLUMN_1,QUALIFIER_1); value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNull(value1); assertNull(value2); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckAndDelete() throws IOException { Get get=new Get(ROW_1); Result result=remoteTable.get(get); byte[] value1=result.getValue(COLUMN_1,QUALIFIER_1); byte[] value2=result.getValue(COLUMN_2,QUALIFIER_2); assertNotNull(value1); assertTrue(Bytes.equals(VALUE_1,value1)); assertNull(value2); assertTrue(remoteTable.exists(get)); assertEquals(1,remoteTable.existsAll(Collections.singletonList(get)).length); Delete delete=new Delete(ROW_1); remoteTable.checkAndDelete(ROW_1,COLUMN_1,QUALIFIER_1,VALUE_1,delete); assertFalse(remoteTable.exists(get)); Put put=new Put(ROW_1); put.addColumn(COLUMN_1,QUALIFIER_1,VALUE_1); remoteTable.put(put); assertTrue(remoteTable.checkAndPut(ROW_1,COLUMN_1,QUALIFIER_1,VALUE_1,put)); assertFalse(remoteTable.checkAndPut(ROW_1,COLUMN_1,QUALIFIER_1,VALUE_2,put)); }

Class: org.apache.hadoop.hbase.security.AbstractTestSecureIPC

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testRpcCallWithEnabledKerberosSaslAuth() throws Exception { UserGroupInformation ugi2=UserGroupInformation.getCurrentUser(); assertSame(ugi,ugi2); assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); assertEquals(krbPrincipal,ugi.getUserName()); callRpcService(User.create(ugi2)); }

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testRpcFallbackToSimpleAuth() throws Exception { String clientUsername="testuser"; UserGroupInformation clientUgi=UserGroupInformation.createUserForTesting(clientUsername,new String[]{clientUsername}); assertNotSame(ugi,clientUgi); assertEquals(AuthenticationMethod.SIMPLE,clientUgi.getAuthenticationMethod()); assertEquals(clientUsername,clientUgi.getUserName()); clientConf.set(User.HBASE_SECURITY_CONF_KEY,"simple"); serverConf.setBoolean(RpcServer.FALLBACK_TO_INSECURE_CLIENT_AUTH,true); callRpcService(User.create(clientUgi)); }

Class: org.apache.hadoop.hbase.security.TestEncryptionUtil

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testKeyWrapping() throws Exception { Configuration conf=new Configuration(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,KeyProviderForTesting.class.getName()); byte[] keyBytes=new byte[AES.KEY_LENGTH]; new SecureRandom().nextBytes(keyBytes); String algorithm=conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); Key key=new SecretKeySpec(keyBytes,algorithm); byte[] wrappedKeyBytes=EncryptionUtil.wrapKey(conf,"hbase",key); assertNotNull(wrappedKeyBytes); Key unwrappedKey=EncryptionUtil.unwrapKey(conf,"hbase",wrappedKeyBytes); assertNotNull(unwrappedKey); assertTrue(unwrappedKey instanceof SecretKeySpec); assertTrue("Unwrapped key bytes do not match original",Bytes.equals(keyBytes,unwrappedKey.getEncoded())); try { EncryptionUtil.unwrapKey(conf,"other",wrappedKeyBytes); fail("Unwrap with incorrect key did not throw KeyException"); } catch ( KeyException e) { } }

APIUtilityVerifier InternalCallVerifier NullVerifier ExceptionVerifier HybridVerifier 
@Test(expected=KeyException.class) public void testWALKeyWrappingWithIncorrectKey() throws Exception { Configuration conf=new Configuration(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,KeyProviderForTesting.class.getName()); byte[] keyBytes=new byte[AES.KEY_LENGTH]; new SecureRandom().nextBytes(keyBytes); String algorithm=conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); Key key=new SecretKeySpec(keyBytes,algorithm); byte[] wrappedKeyBytes=EncryptionUtil.wrapKey(conf,"hbase",key); assertNotNull(wrappedKeyBytes); EncryptionUtil.unwrapWALKey(conf,"other",wrappedKeyBytes); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testWALKeyWrapping() throws Exception { Configuration conf=new Configuration(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,KeyProviderForTesting.class.getName()); byte[] keyBytes=new byte[AES.KEY_LENGTH]; new SecureRandom().nextBytes(keyBytes); String algorithm=conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); Key key=new SecretKeySpec(keyBytes,algorithm); byte[] wrappedKeyBytes=EncryptionUtil.wrapKey(conf,"hbase",key); assertNotNull(wrappedKeyBytes); Key unwrappedKey=EncryptionUtil.unwrapWALKey(conf,"hbase",wrappedKeyBytes); assertNotNull(unwrappedKey); assertTrue(unwrappedKey instanceof SecretKeySpec); assertTrue("Unwrapped key bytes do not match original",Bytes.equals(keyBytes,unwrappedKey.getEncoded())); }

Class: org.apache.hadoop.hbase.security.TestHBaseSaslRpcClient

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSaslClientUsesGivenRpcProtection() throws Exception { Token token=createTokenMockWithCredentials(DEFAULT_USER_NAME,DEFAULT_USER_PASSWORD); for ( SaslUtil.QualityOfProtection qop : SaslUtil.QualityOfProtection.values()) { String negotiatedQop=new HBaseSaslRpcClient(AuthMethod.DIGEST,token,"principal/host@DOMAIN.COM",false,qop.name()){ public String getQop(){ return saslProps.get(Sasl.QOP); } } .getQop(); assertEquals(negotiatedQop,qop.getSaslQop()); } }

Class: org.apache.hadoop.hbase.security.TestUser

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCreateUserForTestingGroupCache() throws Exception { Configuration conf=HBaseConfiguration.create(); User uCreated=User.createUserForTesting(conf,"group_user",new String[]{"MYGROUP"}); UserProvider up=UserProvider.instantiate(conf); User uProvided=up.create(UserGroupInformation.createRemoteUser("group_user")); assertArrayEquals(uCreated.getGroupNames(),uProvided.getGroupNames()); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCacheGetGroupsRoot() throws Exception { if (!SystemUtils.IS_OS_WINDOWS) { Configuration conf=HBaseConfiguration.create(); UserProvider up=UserProvider.instantiate(conf); String rootUserName="root"; UserGroupInformation ugiOne=UserGroupInformation.createRemoteUser(rootUserName); UserGroupInformation ugiTwo=UserGroupInformation.createRemoteUser(rootUserName); User uOne=up.create(ugiOne); User uTwo=up.create(ugiTwo); assertArrayEquals(uOne.getGroupNames(),uTwo.getGroupNames()); String[] groupNames=ugiOne.getGroupNames(); assertTrue(groupNames.length > 0); } }

IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Make sure that we're returning a result for the current user. * Previously getCurrent() was returning null if not initialized on * non-secure Hadoop variants. */ @Test public void testGetCurrent() throws Exception { User user1=User.getCurrent(); assertNotNull(user1.ugi); LOG.debug("User1 is " + user1.getName()); for (int i=0; i < 100; i++) { User u=User.getCurrent(); assertNotNull(u); assertEquals(user1.getName(),u.getName()); assertEquals(user1,u); assertEquals(user1.hashCode(),u.hashCode()); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testRunAs() throws Exception { Configuration conf=HBaseConfiguration.create(); final User user=User.createUserForTesting(conf,"testuser",new String[]{"foo"}); final PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ public String run() throws IOException { User u=User.getCurrent(); return u.getName(); } } ; String username=user.runAs(action); assertEquals("Current user within runAs() should match","testuser",username); User user2=User.createUserForTesting(conf,"testuser2",new String[]{"foo"}); String username2=user2.runAs(action); assertEquals("Second username should match second user","testuser2",username2); username=user.runAs(new PrivilegedExceptionAction(){ public String run() throws Exception { return User.getCurrent().getName(); } } ); assertEquals("User name in runAs() should match","testuser",username); user2.runAs(new PrivilegedExceptionAction(){ public Object run() throws IOException, InterruptedException { String nestedName=user.runAs(action); assertEquals("Nest name should match nested user","testuser",nestedName); assertEquals("Current name should match current user","testuser2",User.getCurrent().getName()); return null; } } ); username=user.runAs(new PrivilegedAction(){ String result=null; @Override public String run(){ try { return User.getCurrent().getName(); } catch ( IOException e) { result="empty"; } return result; } } ); assertEquals("Current user within runAs() should match","testuser",username); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCacheGetGroups() throws Exception { Configuration conf=HBaseConfiguration.create(); UserProvider up=UserProvider.instantiate(conf); String nonUser="kklvfnvhdhcenfnniilggljhdecjhidkle"; UserGroupInformation ugiOne=UserGroupInformation.createRemoteUser(nonUser); UserGroupInformation ugiTwo=UserGroupInformation.createRemoteUser(nonUser); User uOne=up.create(ugiOne); User uTwo=up.create(ugiTwo); assertArrayEquals(uOne.getGroupNames(),uTwo.getGroupNames()); assertTrue(uOne.getGroupNames() == uTwo.getGroupNames()); assertEquals(0,ugiOne.getGroupNames().length); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBasicAttributes() throws Exception { Configuration conf=HBaseConfiguration.create(); User user=User.createUserForTesting(conf,"simple",new String[]{"foo"}); assertEquals("Username should match","simple",user.getName()); assertEquals("Short username should match","simple",user.getShortName()); }

InternalCallVerifier BooleanVerifier 
@Test public void testSecurityForNonSecureHadoop(){ assertFalse("Security should be disable in non-secure Hadoop",User.isSecurityEnabled()); Configuration conf=HBaseConfiguration.create(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(User.HBASE_SECURITY_CONF_KEY,"kerberos"); assertTrue("Security should be enabled",User.isHBaseSecurityEnabled(conf)); conf=HBaseConfiguration.create(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); assertFalse("HBase security should not be enabled if " + User.HBASE_SECURITY_CONF_KEY + " is not set accordingly",User.isHBaseSecurityEnabled(conf)); conf=HBaseConfiguration.create(); conf.set(User.HBASE_SECURITY_CONF_KEY,"kerberos"); assertTrue("HBase security should be enabled regardless of underlying " + "HDFS settings",User.isHBaseSecurityEnabled(conf)); }

Class: org.apache.hadoop.hbase.security.TestUsersOperationsWithSecureHadoop

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * test login with security enabled configuration To run this test, we must specify the following * system properties: *

* hbase.regionserver.kerberos.principal *

* hbase.regionserver.keytab.file * @throws IOException */ @Test public void testUserLoginInSecureHadoop() throws Exception { UserGroupInformation defaultLogin=UserGroupInformation.getLoginUser(); Configuration conf=getConfigurationWoPrincipal(); User.login(conf,HBaseKerberosUtils.KRB_KEYTAB_FILE,HBaseKerberosUtils.KRB_PRINCIPAL,"localhost"); UserGroupInformation failLogin=UserGroupInformation.getLoginUser(); assertTrue("ugi should be the same in case fail login",defaultLogin.equals(failLogin)); String nnKeyTab=getKeytabFileForTesting(); String dnPrincipal=getPrincipalForTesting(); assertNotNull("KerberosKeytab was not specified",nnKeyTab); assertNotNull("KerberosPrincipal was not specified",dnPrincipal); conf=getSecuredConfiguration(); UserGroupInformation.setConfiguration(conf); User.login(conf,HBaseKerberosUtils.KRB_KEYTAB_FILE,HBaseKerberosUtils.KRB_PRINCIPAL,"localhost"); UserGroupInformation successLogin=UserGroupInformation.getLoginUser(); assertFalse("ugi should be different in in case success login",defaultLogin.equals(successLogin)); }


Class: org.apache.hadoop.hbase.security.access.TestAccessController

InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testSecurityCapabilities() throws Exception { List capabilities=TEST_UTIL.getConnection().getAdmin().getSecurityCapabilities(); assertTrue("AUTHORIZATION capability is missing",capabilities.contains(SecurityCapability.AUTHORIZATION)); assertTrue("CELL_AUTHORIZATION capability is missing",capabilities.contains(SecurityCapability.CELL_AUTHORIZATION)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testMergeRegions() throws Exception { final TableName tname=TableName.valueOf("testMergeRegions"); createTestTable(tname); try { final List regions=TEST_UTIL.getHBaseCluster().findRegionsForTable(tname); assertTrue("not enough regions: " + regions.size(),regions.size() >= 2); AccessTestAction action=new AccessTestAction(){ @Override public Object run() throws Exception { ACCESS_CONTROLLER.preMerge(ObserverContext.createAndPrepare(RSCP_ENV,null),regions.get(0),regions.get(1)); return null; } } ; verifyAllowed(action,SUPERUSER,USER_ADMIN,USER_OWNER,USER_GROUP_ADMIN); verifyDenied(action,USER_CREATE,USER_RW,USER_RO,USER_NONE,USER_GROUP_READ,USER_GROUP_WRITE,USER_GROUP_CREATE); } finally { deleteTable(TEST_UTIL,tname); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testAccessControlClientUserPerms() throws Exception { TableName tname=TableName.valueOf("testAccessControlClientUserPerms"); createTestTable(tname); try { final String regex=tname.getNameWithNamespaceInclAsString(); User testUserPerms=User.createUserForTesting(conf,"testUserPerms",new String[0]); assertEquals(0,testUserPerms.runAs(getPrivilegedAction(regex)).size()); grantOnTable(TEST_UTIL,testUserPerms.getShortName(),tname,null,null,Action.ADMIN); List perms=testUserPerms.runAs(getPrivilegedAction(regex)); assertNotNull(perms); assertEquals(2,perms.size()); } finally { deleteTable(TEST_UTIL,tname); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testGlobalPermissionList() throws Exception { List perms; Table acl=systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service=acl.coprocessorService(HConstants.EMPTY_START_ROW); AccessControlService.BlockingInterface protocol=AccessControlService.newBlockingStub(service); perms=ProtobufUtil.getUserPermissions(protocol); } finally { acl.close(); } List adminPerms=new ArrayList(); adminPerms.add(new UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()),AccessControlLists.ACL_TABLE_NAME,null,null,Bytes.toBytes("ACRW"))); List superUsers=Superusers.getSuperUsers(); for ( String user : superUsers) { adminPerms.add(new UserPermission(Bytes.toBytes(user),AccessControlLists.ACL_TABLE_NAME,null,null,Action.values())); } assertTrue("Only super users, global users and user admin has permission on table hbase:acl " + "per setup",perms.size() == 5 + superUsers.size() && hasFoundUserPermission(adminPerms,perms)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testPermissionList() throws Exception { final TableName tableName=TableName.valueOf("testPermissionList"); final byte[] family1=Bytes.toBytes("f1"); final byte[] family2=Bytes.toBytes("f2"); final byte[] qualifier=Bytes.toBytes("q"); Admin admin=TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { deleteTable(TEST_UTIL,tableName); } HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); htd.setOwner(USER_OWNER); createTable(TEST_UTIL,htd); try { List perms; Table acl=systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service=acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol=AccessControlService.newBlockingStub(service); perms=ProtobufUtil.getUserPermissions(protocol,tableName); } finally { acl.close(); } UserPermission ownerperm=new UserPermission(Bytes.toBytes(USER_OWNER.getName()),tableName,null,Action.values()); assertTrue("Owner should have all permissions on table",hasFoundUserPermission(ownerperm,perms)); User user=User.createUserForTesting(TEST_UTIL.getConfiguration(),"user",new String[0]); byte[] userName=Bytes.toBytes(user.getShortName()); UserPermission up=new UserPermission(userName,tableName,family1,qualifier,Permission.Action.READ); assertFalse("User should not be granted permission: " + up.toString(),hasFoundUserPermission(up,perms)); grantOnTable(TEST_UTIL,user.getShortName(),tableName,family1,qualifier,Permission.Action.READ); acl=systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service=acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol=AccessControlService.newBlockingStub(service); perms=ProtobufUtil.getUserPermissions(protocol,tableName); } finally { acl.close(); } UserPermission upToVerify=new UserPermission(userName,tableName,family1,qualifier,Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(),hasFoundUserPermission(upToVerify,perms)); upToVerify=new UserPermission(userName,tableName,family1,qualifier,Permission.Action.WRITE); assertFalse("User should not be granted permission: " + upToVerify.toString(),hasFoundUserPermission(upToVerify,perms)); grantOnTable(TEST_UTIL,user.getShortName(),tableName,family1,qualifier,Permission.Action.WRITE,Permission.Action.READ); acl=systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service=acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol=AccessControlService.newBlockingStub(service); perms=ProtobufUtil.getUserPermissions(protocol,tableName); } finally { acl.close(); } upToVerify=new UserPermission(userName,tableName,family1,qualifier,Permission.Action.WRITE,Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(),hasFoundUserPermission(upToVerify,perms)); revokeFromTable(TEST_UTIL,user.getShortName(),tableName,family1,qualifier,Permission.Action.WRITE,Permission.Action.READ); acl=systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service=acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol=AccessControlService.newBlockingStub(service); perms=ProtobufUtil.getUserPermissions(protocol,tableName); } finally { acl.close(); } assertFalse("User should not be granted permission: " + upToVerify.toString(),hasFoundUserPermission(upToVerify,perms)); admin.disableTable(tableName); User newOwner=User.createUserForTesting(conf,"new_owner",new String[]{}); htd.setOwner(newOwner); admin.modifyTable(tableName,htd); acl=systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service=acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol=AccessControlService.newBlockingStub(service); perms=ProtobufUtil.getUserPermissions(protocol,tableName); } finally { acl.close(); } UserPermission newOwnerperm=new UserPermission(Bytes.toBytes(newOwner.getName()),tableName,null,Action.values()); assertTrue("New owner should have all permissions on table",hasFoundUserPermission(newOwnerperm,perms)); } finally { deleteTable(TEST_UTIL,tableName); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=180000) public void testAccessControllerUserPermsRegexHandling() throws Exception { User testRegexHandler=User.createUserForTesting(conf,"testRegexHandling",new String[0]); final String REGEX_ALL_TABLES=".*"; final String tableName="testRegex"; final TableName table1=TableName.valueOf(tableName); final byte[] family=Bytes.toBytes("f1"); Admin admin=TEST_UTIL.getHBaseAdmin(); HTableDescriptor htd=new HTableDescriptor(table1); htd.addFamily(new HColumnDescriptor(family)); createTable(TEST_UTIL,htd); String ns="testNamespace"; NamespaceDescriptor desc=NamespaceDescriptor.create(ns).build(); final TableName table2=TableName.valueOf(ns,tableName); createNamespace(TEST_UTIL,desc); htd=new HTableDescriptor(table2); htd.addFamily(new HColumnDescriptor(family)); createTable(TEST_UTIL,htd); String aclTableName=AccessControlLists.ACL_TABLE_NAME.getNameAsString(); assertEquals(5,SUPERUSER.runAs(getPrivilegedAction(aclTableName)).size()); assertEquals(0,testRegexHandler.runAs(getPrivilegedAction(aclTableName)).size()); assertEquals(0,testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size()); grantOnTable(TEST_UTIL,testRegexHandler.getShortName(),table1,null,null,Action.ADMIN); assertEquals(2,testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size()); grantOnTable(TEST_UTIL,testRegexHandler.getShortName(),table2,null,null,Action.ADMIN); assertEquals(4,testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size()); assertEquals(2,testRegexHandler.runAs(getPrivilegedAction(tableName)).size()); assertEquals(2,testRegexHandler.runAs(getPrivilegedAction(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + TableName.NAMESPACE_DELIM + tableName)).size()); assertEquals(2,testRegexHandler.runAs(getPrivilegedAction(ns + TableName.NAMESPACE_DELIM + tableName)).size()); assertEquals(0,testRegexHandler.runAs(getPrivilegedAction("notMatchingAny")).size()); deleteTable(TEST_UTIL,table1); deleteTable(TEST_UTIL,table2); deleteNamespace(TEST_UTIL,ns); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testTruncatePerms() throws Exception { try { List existingPerms=AccessControlClient.getUserPermissions(systemUserConnection,TEST_TABLE.getNameAsString()); assertTrue(existingPerms != null); assertTrue(existingPerms.size() > 1); TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE); TEST_UTIL.truncateTable(TEST_TABLE); TEST_UTIL.waitTableAvailable(TEST_TABLE); List perms=AccessControlClient.getUserPermissions(systemUserConnection,TEST_TABLE.getNameAsString()); assertTrue(perms != null); assertEquals(existingPerms.size(),perms.size()); } catch ( Throwable e) { throw new HBaseIOException(e); } }

Class: org.apache.hadoop.hbase.security.access.TestAccessController2

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test(timeout=180000) public void testCreateWithCorrectOwner() throws Exception { final User testUser=User.createUserForTesting(TEST_UTIL.getConfiguration(),"TestUser",new String[0]); SecureTestUtil.grantGlobal(TEST_UTIL,testUser.getShortName(),Action.CREATE); verifyAllowed(new AccessTestAction(){ @Override public Object run() throws Exception { HTableDescriptor desc=new HTableDescriptor(TEST_TABLE.getTableName()); desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); try (Connection connection=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(),testUser)){ try (Admin admin=connection.getAdmin()){ createTable(TEST_UTIL,admin,desc); } } return null; } } ,testUser); TEST_UTIL.waitTableAvailable(TEST_TABLE.getTableName()); List perms=AccessControlLists.getTablePermissions(conf,TEST_TABLE.getTableName()).get(testUser.getShortName()); assertNotNull(perms); assertFalse(perms.isEmpty()); assertTrue(perms.get(0).implies(Permission.Action.READ)); assertTrue(perms.get(0).implies(Permission.Action.WRITE)); assertTrue(perms.get(0).implies(Permission.Action.EXEC)); assertTrue(perms.get(0).implies(Permission.Action.CREATE)); assertTrue(perms.get(0).implies(Permission.Action.ADMIN)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=180000) public void testACLZNodeDeletion() throws Exception { String baseAclZNode="/hbase/acl/"; String ns="testACLZNodeDeletionNamespace"; NamespaceDescriptor desc=NamespaceDescriptor.create(ns).build(); createNamespace(TEST_UTIL,desc); final TableName table=TableName.valueOf(ns,"testACLZNodeDeletionTable"); final byte[] family=Bytes.toBytes("f1"); HTableDescriptor htd=new HTableDescriptor(table); htd.addFamily(new HColumnDescriptor(family)); createTable(TEST_UTIL,htd); grantOnNamespace(TEST_UTIL,TESTGROUP1_USER1.getShortName(),ns,Action.ADMIN); ZooKeeperWatcher zkw=TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper(); assertTrue("The acl znode for table should exist",ZKUtil.checkExists(zkw,baseAclZNode + table.getNameAsString()) != -1); assertTrue("The acl znode for namespace should exist",ZKUtil.checkExists(zkw,baseAclZNode + convertToNamespace(ns)) != -1); revokeFromNamespace(TEST_UTIL,TESTGROUP1_USER1.getShortName(),ns,Action.ADMIN); deleteTable(TEST_UTIL,table); deleteNamespace(TEST_UTIL,ns); assertTrue("The acl znode for table should have been deleted",ZKUtil.checkExists(zkw,baseAclZNode + table.getNameAsString()) == -1); assertTrue("The acl znode for namespace should have been deleted",ZKUtil.checkExists(zkw,baseAclZNode + convertToNamespace(ns)) == -1); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=300000) public void testPostGrantAndRevokeScanAction() throws Exception { AccessTestAction scanTableActionForGroupWithTableLevelAccess=new AccessTestAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s1=new Scan(); try (ResultScanner scanner1=table.getScanner(s1)){ Result[] next1=scanner1.next(5); assertTrue("User having table level access should be able to scan all " + "the data in the table.",next1.length == 3); } } return null; } } ; AccessTestAction scanTableActionForGroupWithFamilyLevelAccess=new AccessTestAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s1=new Scan(); try (ResultScanner scanner1=table.getScanner(s1)){ Result[] next1=scanner1.next(5); assertTrue("User having column family level access should be able to scan all " + "the data belonging to that family.",next1.length == 2); } } return null; } } ; AccessTestAction scanFamilyActionForGroupWithFamilyLevelAccess=new AccessTestAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s1=new Scan(); s1.addFamily(TEST_FAMILY_2); try (ResultScanner scanner1=table.getScanner(s1)){ } } return null; } } ; AccessTestAction scanTableActionForGroupWithQualifierLevelAccess=new AccessTestAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s1=new Scan(); try (ResultScanner scanner1=table.getScanner(s1)){ Result[] next1=scanner1.next(5); assertTrue("User having column qualifier level access should be able to scan " + "that column family qualifier data.",next1.length == 1); } } return null; } } ; AccessTestAction scanFamilyActionForGroupWithQualifierLevelAccess=new AccessTestAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s1=new Scan(); s1.addFamily(TEST_FAMILY_2); try (ResultScanner scanner1=table.getScanner(s1)){ } } return null; } } ; AccessTestAction scanQualifierActionForGroupWithQualifierLevelAccess=new AccessTestAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s1=new Scan(); s1.addColumn(TEST_FAMILY,Q2); try (ResultScanner scanner1=table.getScanner(s1)){ } } return null; } } ; grantOnTable(TEST_UTIL,TESTGROUP_1_NAME,tableName,null,null,Action.READ); verifyAllowed(TESTGROUP1_USER1,scanTableActionForGroupWithTableLevelAccess); verifyDenied(TESTGROUP2_USER1,scanTableActionForGroupWithTableLevelAccess); revokeFromTable(TEST_UTIL,TESTGROUP_1_NAME,tableName,null,null); verifyDenied(TESTGROUP1_USER1,scanTableActionForGroupWithTableLevelAccess); grantOnTable(TEST_UTIL,TESTGROUP_1_NAME,tableName,TEST_FAMILY,null,Permission.Action.READ); verifyAllowed(TESTGROUP1_USER1,scanTableActionForGroupWithFamilyLevelAccess); verifyDenied(TESTGROUP1_USER1,scanFamilyActionForGroupWithFamilyLevelAccess); verifyDenied(TESTGROUP2_USER1,scanTableActionForGroupWithFamilyLevelAccess); verifyDenied(TESTGROUP2_USER1,scanFamilyActionForGroupWithFamilyLevelAccess); revokeFromTable(TEST_UTIL,TESTGROUP_1_NAME,tableName,TEST_FAMILY,null); verifyDenied(TESTGROUP1_USER1,scanTableActionForGroupWithFamilyLevelAccess); grantOnTable(TEST_UTIL,TESTGROUP_1_NAME,tableName,TEST_FAMILY,Q1,Action.READ); verifyAllowed(TESTGROUP1_USER1,scanTableActionForGroupWithQualifierLevelAccess); verifyDenied(TESTGROUP1_USER1,scanFamilyActionForGroupWithQualifierLevelAccess); verifyDenied(TESTGROUP1_USER1,scanQualifierActionForGroupWithQualifierLevelAccess); verifyDenied(TESTGROUP2_USER1,scanTableActionForGroupWithQualifierLevelAccess); verifyDenied(TESTGROUP2_USER1,scanFamilyActionForGroupWithQualifierLevelAccess); verifyDenied(TESTGROUP2_USER1,scanQualifierActionForGroupWithQualifierLevelAccess); revokeFromTable(TEST_UTIL,TESTGROUP_1_NAME,tableName,TEST_FAMILY,Q1); verifyDenied(TESTGROUP1_USER1,scanTableActionForGroupWithQualifierLevelAccess); }

Class: org.apache.hadoop.hbase.security.access.TestAccessController3

TestCleaner InternalCallVerifier BooleanVerifier HybridVerifier 
@AfterClass public static void tearDownAfterClass() throws Exception { HRegionServer rs=null; for ( JVMClusterUtil.RegionServerThread thread : TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) { rs=thread.getRegionServer(); } cleanUp(); TEST_UTIL.shutdownMiniCluster(); assertTrue("region server should have aborted due to FaultyAccessController",rs.isAborted()); }

Class: org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions

TestCleaner InternalCallVerifier EqualityVerifier HybridVerifier 
@After public void tearDown() throws Exception { try { TEST_UTIL.deleteTable(TEST_TABLE.getTableName()); } catch ( TableNotFoundException ex) { LOG.info("Test deleted table " + TEST_TABLE.getTableName()); } assertEquals(0,AccessControlLists.getTablePermissions(conf,TEST_TABLE.getTableName()).size()); }

Class: org.apache.hadoop.hbase.security.access.TestCellACLs

TestCleaner InternalCallVerifier EqualityVerifier HybridVerifier 
@After public void tearDown() throws Exception { try { TEST_UTIL.deleteTable(TEST_TABLE.getTableName()); } catch ( TableNotFoundException ex) { LOG.info("Test deleted table " + TEST_TABLE.getTableName()); } assertEquals(0,AccessControlLists.getTablePermissions(conf,TEST_TABLE.getTableName()).size()); }

Class: org.apache.hadoop.hbase.security.access.TestNamespaceCommands

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAclTableEntries() throws Exception { String userTestNamespace="userTestNsp"; Table acl=UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); try { ListMultimap perms=AccessControlLists.getNamespacePermissions(conf,TEST_NAMESPACE); perms=AccessControlLists.getNamespacePermissions(conf,TEST_NAMESPACE); for ( Map.Entry entry : perms.entries()) { LOG.debug(entry); } assertEquals(6,perms.size()); grantOnNamespace(UTIL,userTestNamespace,TEST_NAMESPACE,Permission.Action.WRITE); Result result=acl.get(new Get(Bytes.toBytes(userTestNamespace))); assertTrue(result != null); perms=AccessControlLists.getNamespacePermissions(conf,TEST_NAMESPACE); assertEquals(7,perms.size()); List namespacePerms=perms.get(userTestNamespace); assertTrue(perms.containsKey(userTestNamespace)); assertEquals(1,namespacePerms.size()); assertEquals(TEST_NAMESPACE,namespacePerms.get(0).getNamespace()); assertEquals(null,namespacePerms.get(0).getFamily()); assertEquals(null,namespacePerms.get(0).getQualifier()); assertEquals(1,namespacePerms.get(0).getActions().length); assertEquals(Permission.Action.WRITE,namespacePerms.get(0).getActions()[0]); revokeFromNamespace(UTIL,userTestNamespace,TEST_NAMESPACE,Permission.Action.WRITE); perms=AccessControlLists.getNamespacePermissions(conf,TEST_NAMESPACE); assertEquals(6,perms.size()); } finally { acl.close(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testListNamespaces() throws Exception { AccessTestAction listAction=new AccessTestAction(){ @Override public Object run() throws Exception { Connection unmanagedConnection=ConnectionFactory.createConnection(UTIL.getConfiguration()); Admin admin=unmanagedConnection.getAdmin(); try { return Arrays.asList(admin.listNamespaceDescriptors()); } finally { admin.close(); unmanagedConnection.close(); } } } ; verifyAllowed(listAction,SUPERUSER,USER_GLOBAL_ADMIN,USER_NS_ADMIN,USER_GROUP_ADMIN); assertEquals(4,((List)SUPERUSER.runAs(listAction)).size()); assertEquals(4,((List)USER_GLOBAL_ADMIN.runAs(listAction)).size()); assertEquals(4,((List)USER_GROUP_ADMIN.runAs(listAction)).size()); assertEquals(2,((List)USER_NS_ADMIN.runAs(listAction)).size()); assertEquals(0,((List)USER_GLOBAL_CREATE.runAs(listAction)).size()); assertEquals(0,((List)USER_GLOBAL_WRITE.runAs(listAction)).size()); assertEquals(0,((List)USER_GLOBAL_READ.runAs(listAction)).size()); assertEquals(0,((List)USER_GLOBAL_EXEC.runAs(listAction)).size()); assertEquals(0,((List)USER_NS_CREATE.runAs(listAction)).size()); assertEquals(0,((List)USER_NS_WRITE.runAs(listAction)).size()); assertEquals(0,((List)USER_NS_READ.runAs(listAction)).size()); assertEquals(0,((List)USER_NS_EXEC.runAs(listAction)).size()); assertEquals(0,((List)USER_TABLE_CREATE.runAs(listAction)).size()); assertEquals(0,((List)USER_TABLE_WRITE.runAs(listAction)).size()); assertEquals(0,((List)USER_GROUP_CREATE.runAs(listAction)).size()); assertEquals(0,((List)USER_GROUP_READ.runAs(listAction)).size()); assertEquals(0,((List)USER_GROUP_WRITE.runAs(listAction)).size()); }

Class: org.apache.hadoop.hbase.security.access.TestScanEarlyTermination

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier 
@Test public void testEarlyScanTermination() throws Exception { grantOnTable(TEST_UTIL,USER_OTHER.getShortName(),TEST_TABLE.getTableName(),TEST_FAMILY1,null,Action.READ); verifyAllowed(new AccessTestAction(){ @Override public Object run() throws Exception { conf.set("testkey",UUID.randomUUID().toString()); Connection connection=ConnectionFactory.createConnection(conf); Table t=connection.getTable(TEST_TABLE.getTableName()); try { Put put=new Put(TEST_ROW).addColumn(TEST_FAMILY1,TEST_Q1,ZERO); t.put(put); put=new Put(TEST_ROW).addColumn(TEST_FAMILY2,TEST_Q1,ZERO); put.setACL(USER_OTHER.getShortName(),new Permission(Action.READ)); t.put(put); put=new Put(TEST_ROW).addColumn(TEST_FAMILY2,TEST_Q2,ZERO); put.setACL(USER_OTHER.getShortName(),new Permission()); t.put(put); } finally { t.close(); connection.close(); } return null; } } ,USER_OWNER); verifyAllowed(new AccessTestAction(){ @Override public Object run() throws Exception { conf.set("testkey",UUID.randomUUID().toString()); Connection connection=ConnectionFactory.createConnection(conf); Table t=connection.getTable(TEST_TABLE.getTableName()); try { Scan scan=new Scan().addFamily(TEST_FAMILY1); Result result=t.getScanner(scan).next(); if (result != null) { assertTrue("Improper exclusion",result.containsColumn(TEST_FAMILY1,TEST_Q1)); assertFalse("Improper inclusion",result.containsColumn(TEST_FAMILY2,TEST_Q1)); return result.listCells(); } return null; } finally { t.close(); connection.close(); } } } ,USER_OTHER); verifyAllowed(new AccessTestAction(){ @Override public Object run() throws Exception { conf.set("testkey",UUID.randomUUID().toString()); Connection connection=ConnectionFactory.createConnection(conf); Table t=connection.getTable(TEST_TABLE.getTableName()); try { Scan scan=new Scan(); Result result=t.getScanner(scan).next(); if (result != null) { assertTrue("Improper exclusion",result.containsColumn(TEST_FAMILY1,TEST_Q1)); assertFalse("Improper inclusion",result.containsColumn(TEST_FAMILY2,TEST_Q1)); return result.listCells(); } return null; } finally { t.close(); connection.close(); } } } ,USER_OTHER); verifyDenied(new AccessTestAction(){ @Override public Object run() throws Exception { conf.set("testkey",UUID.randomUUID().toString()); Connection connection=ConnectionFactory.createConnection(conf); Table t=connection.getTable(TEST_TABLE.getTableName()); try { Scan scan=new Scan().addFamily(TEST_FAMILY2); Result result=t.getScanner(scan).next(); if (result != null) { return result.listCells(); } return null; } finally { t.close(); connection.close(); } } } ,USER_OTHER); grantOnTable(TEST_UTIL,USER_OTHER.getShortName(),TEST_TABLE.getTableName(),TEST_FAMILY2,TEST_Q2,Action.READ); verifyAllowed(new AccessTestAction(){ @Override public Object run() throws Exception { conf.set("testkey",UUID.randomUUID().toString()); Connection connection=ConnectionFactory.createConnection(conf); Table t=connection.getTable(TEST_TABLE.getTableName()); try { Scan scan=new Scan(); Result result=t.getScanner(scan).next(); if (result != null) { assertTrue("Improper exclusion",result.containsColumn(TEST_FAMILY1,TEST_Q1)); assertFalse("Improper inclusion",result.containsColumn(TEST_FAMILY2,TEST_Q1)); assertTrue("Improper exclusion",result.containsColumn(TEST_FAMILY2,TEST_Q2)); return result.listCells(); } return null; } finally { t.close(); connection.close(); } } } ,USER_OTHER); }

TestCleaner InternalCallVerifier EqualityVerifier HybridVerifier 
@After public void tearDown() throws Exception { try { TEST_UTIL.deleteTable(TEST_TABLE.getTableName()); } catch ( TableNotFoundException ex) { LOG.info("Test deleted table " + TEST_TABLE.getTableName()); } assertEquals(0,AccessControlLists.getTablePermissions(conf,TEST_TABLE.getTableName()).size()); }

Class: org.apache.hadoop.hbase.security.access.TestTablePermissions

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testAuthManager() throws Exception { Configuration conf=UTIL.getConfiguration(); TableAuthManager authManager=TableAuthManager.getOrCreate(ZKW,conf); User currentUser=User.getCurrent(); assertTrue(authManager.authorize(currentUser,Permission.Action.ADMIN)); for (int i=1; i <= 50; i++) { AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("testauth" + i),Permission.Action.ADMIN,Permission.Action.READ,Permission.Action.WRITE)); assertTrue("Failed current user auth check on iter " + i,authManager.authorize(currentUser,Permission.Action.ADMIN)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGlobalPermission() throws Exception { Configuration conf=UTIL.getConfiguration(); AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("user1"),Permission.Action.READ,Permission.Action.WRITE)); AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("user2"),Permission.Action.CREATE)); AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("user3"),Permission.Action.ADMIN,Permission.Action.READ,Permission.Action.CREATE)); ListMultimap perms=AccessControlLists.getTablePermissions(conf,null); List user1Perms=perms.get("user1"); assertEquals("Should have 1 permission for user1",1,user1Perms.size()); assertEquals("user1 should have WRITE permission",new Permission.Action[]{Permission.Action.READ,Permission.Action.WRITE},user1Perms.get(0).getActions()); List user2Perms=perms.get("user2"); assertEquals("Should have 1 permission for user2",1,user2Perms.size()); assertEquals("user2 should have CREATE permission",new Permission.Action[]{Permission.Action.CREATE},user2Perms.get(0).getActions()); List user3Perms=perms.get("user3"); assertEquals("Should have 1 permission for user3",1,user3Perms.size()); assertEquals("user3 should have ADMIN, READ, CREATE permission",new Permission.Action[]{Permission.Action.ADMIN,Permission.Action.READ,Permission.Action.CREATE},user3Perms.get(0).getActions()); }

InternalCallVerifier BooleanVerifier 
@Test public void testEquals() throws Exception { TablePermission p1=new TablePermission(TEST_TABLE,null,TablePermission.Action.READ); TablePermission p2=new TablePermission(TEST_TABLE,null,TablePermission.Action.READ); assertTrue(p1.equals(p2)); assertTrue(p2.equals(p1)); p1=new TablePermission(TEST_TABLE,null,TablePermission.Action.READ,TablePermission.Action.WRITE); p2=new TablePermission(TEST_TABLE,null,TablePermission.Action.WRITE,TablePermission.Action.READ); assertTrue(p1.equals(p2)); assertTrue(p2.equals(p1)); p1=new TablePermission(TEST_TABLE,TEST_FAMILY,TablePermission.Action.READ,TablePermission.Action.WRITE); p2=new TablePermission(TEST_TABLE,TEST_FAMILY,TablePermission.Action.WRITE,TablePermission.Action.READ); assertTrue(p1.equals(p2)); assertTrue(p2.equals(p1)); p1=new TablePermission(TEST_TABLE,TEST_FAMILY,TEST_QUALIFIER,TablePermission.Action.READ,TablePermission.Action.WRITE); p2=new TablePermission(TEST_TABLE,TEST_FAMILY,TEST_QUALIFIER,TablePermission.Action.WRITE,TablePermission.Action.READ); assertTrue(p1.equals(p2)); assertTrue(p2.equals(p1)); p1=new TablePermission(TEST_TABLE,null,TablePermission.Action.READ); p2=new TablePermission(TEST_TABLE,TEST_FAMILY,TablePermission.Action.READ); assertFalse(p1.equals(p2)); assertFalse(p2.equals(p1)); p1=new TablePermission(TEST_TABLE,null,TablePermission.Action.READ); p2=new TablePermission(TEST_TABLE,null,TablePermission.Action.WRITE); assertFalse(p1.equals(p2)); assertFalse(p2.equals(p1)); p2=new TablePermission(TEST_TABLE,null,TablePermission.Action.READ,TablePermission.Action.WRITE); assertFalse(p1.equals(p2)); assertFalse(p2.equals(p1)); p1=new TablePermission(TEST_TABLE,null,TablePermission.Action.READ); p2=new TablePermission(TEST_TABLE2,null,TablePermission.Action.READ); assertFalse(p1.equals(p2)); assertFalse(p2.equals(p1)); p2=new TablePermission(TEST_TABLE,null); assertFalse(p1.equals(p2)); assertFalse(p2.equals(p1)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBasicWrite() throws Exception { Configuration conf=UTIL.getConfiguration(); AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("george"),TEST_TABLE,null,(byte[])null,UserPermission.Action.READ,UserPermission.Action.WRITE)); AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("hubert"),TEST_TABLE,null,(byte[])null,UserPermission.Action.READ)); AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("humphrey"),TEST_TABLE,TEST_FAMILY,TEST_QUALIFIER,UserPermission.Action.READ)); ListMultimap perms=AccessControlLists.getTablePermissions(conf,TEST_TABLE); List userPerms=perms.get("george"); assertNotNull("Should have permissions for george",userPerms); assertEquals("Should have 1 permission for george",1,userPerms.size()); TablePermission permission=userPerms.get(0); assertEquals("Permission should be for " + TEST_TABLE,TEST_TABLE,permission.getTableName()); assertNull("Column family should be empty",permission.getFamily()); assertNotNull(permission.getActions()); assertEquals(2,permission.getActions().length); List actions=Arrays.asList(permission.getActions()); assertTrue(actions.contains(TablePermission.Action.READ)); assertTrue(actions.contains(TablePermission.Action.WRITE)); userPerms=perms.get("hubert"); assertNotNull("Should have permissions for hubert",userPerms); assertEquals("Should have 1 permission for hubert",1,userPerms.size()); permission=userPerms.get(0); assertEquals("Permission should be for " + TEST_TABLE,TEST_TABLE,permission.getTableName()); assertNull("Column family should be empty",permission.getFamily()); assertNotNull(permission.getActions()); assertEquals(1,permission.getActions().length); actions=Arrays.asList(permission.getActions()); assertTrue(actions.contains(TablePermission.Action.READ)); assertFalse(actions.contains(TablePermission.Action.WRITE)); userPerms=perms.get("humphrey"); assertNotNull("Should have permissions for humphrey",userPerms); assertEquals("Should have 1 permission for humphrey",1,userPerms.size()); permission=userPerms.get(0); assertEquals("Permission should be for " + TEST_TABLE,TEST_TABLE,permission.getTableName()); assertTrue("Permission should be for family " + TEST_FAMILY,Bytes.equals(TEST_FAMILY,permission.getFamily())); assertTrue("Permission should be for qualifier " + TEST_QUALIFIER,Bytes.equals(TEST_QUALIFIER,permission.getQualifier())); assertNotNull(permission.getActions()); assertEquals(1,permission.getActions().length); actions=Arrays.asList(permission.getActions()); assertTrue(actions.contains(TablePermission.Action.READ)); assertFalse(actions.contains(TablePermission.Action.WRITE)); AccessControlLists.addUserPermission(conf,new UserPermission(Bytes.toBytes("hubert"),TEST_TABLE2,null,(byte[])null,TablePermission.Action.READ,TablePermission.Action.WRITE)); Map> allPerms=AccessControlLists.loadAll(conf); assertEquals("Full permission map should have entries for both test tables",2,allPerms.size()); userPerms=allPerms.get(TEST_TABLE.getName()).get("hubert"); assertNotNull(userPerms); assertEquals(1,userPerms.size()); permission=userPerms.get(0); assertEquals(TEST_TABLE,permission.getTableName()); assertEquals(1,permission.getActions().length); assertEquals(TablePermission.Action.READ,permission.getActions()[0]); userPerms=allPerms.get(TEST_TABLE2.getName()).get("hubert"); assertNotNull(userPerms); assertEquals(1,userPerms.size()); permission=userPerms.get(0); assertEquals(TEST_TABLE2,permission.getTableName()); assertEquals(2,permission.getActions().length); actions=Arrays.asList(permission.getActions()); assertTrue(actions.contains(TablePermission.Action.READ)); assertTrue(actions.contains(TablePermission.Action.WRITE)); }

Class: org.apache.hadoop.hbase.security.access.TestWithDisabledAuthorization

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setUp() throws Exception { Admin admin=TEST_UTIL.getHBaseAdmin(); HTableDescriptor htd=new HTableDescriptor(TEST_TABLE.getTableName()); HColumnDescriptor hcd=new HColumnDescriptor(TEST_FAMILY); hcd.setMaxVersions(100); htd.addFamily(hcd); htd.setOwner(USER_OWNER); admin.createTable(htd,new byte[][]{Bytes.toBytes("s")}); TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE.getTableName()); Region region=TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0); RegionCoprocessorHost rcpHost=region.getCoprocessorHost(); RCP_ENV=rcpHost.createEnvironment(AccessController.class,ACCESS_CONTROLLER,Coprocessor.PRIORITY_HIGHEST,1,TEST_UTIL.getConfiguration()); grantGlobal(TEST_UTIL,USER_ADMIN.getShortName(),Permission.Action.ADMIN,Permission.Action.CREATE,Permission.Action.READ,Permission.Action.WRITE); grantOnTable(TEST_UTIL,USER_RW.getShortName(),TEST_TABLE.getTableName(),TEST_FAMILY,null,Permission.Action.READ,Permission.Action.WRITE); grantOnTable(TEST_UTIL,USER_CREATE.getShortName(),TEST_TABLE.getTableName(),null,null,Permission.Action.CREATE,Permission.Action.READ,Permission.Action.WRITE); grantOnTable(TEST_UTIL,USER_RO.getShortName(),TEST_TABLE.getTableName(),TEST_FAMILY,null,Permission.Action.READ); grantOnTable(TEST_UTIL,USER_QUAL.getShortName(),TEST_TABLE.getTableName(),TEST_FAMILY,TEST_Q1,Permission.Action.READ,Permission.Action.WRITE); assertEquals(5,AccessControlLists.getTablePermissions(TEST_UTIL.getConfiguration(),TEST_TABLE.getTableName()).size()); }

TestCleaner InternalCallVerifier EqualityVerifier HybridVerifier 
@After public void tearDown() throws Exception { try { deleteTable(TEST_UTIL,TEST_TABLE.getTableName()); } catch ( TableNotFoundException ex) { LOG.info("Test deleted table " + TEST_TABLE.getTableName()); } assertEquals(0,AccessControlLists.getTablePermissions(TEST_UTIL.getConfiguration(),TEST_TABLE.getTableName()).size()); assertEquals(0,AccessControlLists.getNamespacePermissions(TEST_UTIL.getConfiguration(),TEST_TABLE.getTableName().getNamespaceAsString()).size()); }

Class: org.apache.hadoop.hbase.security.access.TestZKPermissionsWatcher

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testPermissionsWatcher() throws Exception { Configuration conf=UTIL.getConfiguration(); User george=User.createUserForTesting(conf,"george",new String[]{}); User hubert=User.createUserForTesting(conf,"hubert",new String[]{}); assertFalse(AUTH_A.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_A.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.WRITE)); assertFalse(AUTH_A.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_A.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.WRITE)); assertFalse(AUTH_B.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_B.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.WRITE)); assertFalse(AUTH_B.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_B.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.WRITE)); List acl=new ArrayList(); acl.add(new TablePermission(TEST_TABLE,null,TablePermission.Action.READ,TablePermission.Action.WRITE)); final long mtimeB=AUTH_B.getMTime(); AUTH_A.setTableUserPermissions(george.getShortName(),TEST_TABLE,acl); UTIL.waitFor(10000,100,new Predicate(){ @Override public boolean evaluate() throws Exception { return AUTH_B.getMTime() > mtimeB; } } ); Thread.sleep(1000); assertTrue(AUTH_A.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.READ)); assertTrue(AUTH_A.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.WRITE)); assertTrue(AUTH_B.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.READ)); assertTrue(AUTH_B.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.WRITE)); assertFalse(AUTH_A.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_A.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.WRITE)); assertFalse(AUTH_B.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_B.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.WRITE)); acl=new ArrayList(); acl.add(new TablePermission(TEST_TABLE,null,TablePermission.Action.READ)); final long mtimeA=AUTH_A.getMTime(); AUTH_B.setTableUserPermissions("hubert",TEST_TABLE,acl); UTIL.waitFor(10000,100,new Predicate(){ @Override public boolean evaluate() throws Exception { return AUTH_A.getMTime() > mtimeA; } } ); Thread.sleep(1000); assertTrue(AUTH_A.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.READ)); assertTrue(AUTH_A.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.WRITE)); assertTrue(AUTH_B.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.READ)); assertTrue(AUTH_B.authorizeUser(george,TEST_TABLE,null,TablePermission.Action.WRITE)); assertTrue(AUTH_A.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_A.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.WRITE)); assertTrue(AUTH_B.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.READ)); assertFalse(AUTH_B.authorizeUser(hubert,TEST_TABLE,null,TablePermission.Action.WRITE)); }

Class: org.apache.hadoop.hbase.security.token.TestAuthenticationKey

InternalCallVerifier EqualityVerifier 
@Test public void test() throws UnsupportedEncodingException { SecretKey secret=Mockito.mock(SecretKey.class); Mockito.when(secret.getEncoded()).thenReturn("secret".getBytes("UTF-8")); AuthenticationKey key=new AuthenticationKey(0,1234,secret); assertEquals(key.hashCode(),new AuthenticationKey(0,1234,secret).hashCode()); assertEquals(key,new AuthenticationKey(0,1234,secret)); AuthenticationKey otherID=new AuthenticationKey(1,1234,secret); assertNotEquals(key.hashCode(),otherID.hashCode()); assertNotEquals(key,otherID); AuthenticationKey otherExpiry=new AuthenticationKey(0,8765,secret); assertNotEquals(key.hashCode(),otherExpiry.hashCode()); assertNotEquals(key,otherExpiry); SecretKey other=Mockito.mock(SecretKey.class); Mockito.when(secret.getEncoded()).thenReturn("other".getBytes("UTF-8")); AuthenticationKey otherSecret=new AuthenticationKey(0,1234,other); assertNotEquals(key.hashCode(),otherSecret.hashCode()); assertNotEquals(key,otherSecret); }

Class: org.apache.hadoop.hbase.security.token.TestTokenAuthentication

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTokenCreation() throws Exception { Token token=secretManager.generateToken("testuser"); AuthenticationTokenIdentifier ident=new AuthenticationTokenIdentifier(); Writables.getWritable(token.getIdentifier(),ident); assertEquals("Token username should match","testuser",ident.getUsername()); byte[] passwd=secretManager.retrievePassword(ident); assertTrue("Token password and password from secret manager should match",Bytes.equals(token.getPassword(),passwd)); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testUseExistingToken() throws Exception { User user=User.createUserForTesting(TEST_UTIL.getConfiguration(),"testuser2",new String[]{"testgroup"}); Token token=secretManager.generateToken(user.getName()); assertNotNull(token); user.addToken(token); Token firstToken=new AuthenticationTokenSelector().selectToken(token.getService(),user.getTokens()); assertNotNull(firstToken); assertEquals(token,firstToken); Connection conn=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); try { assertFalse(TokenUtil.addTokenIfMissing(conn,user)); Token secondToken=new AuthenticationTokenSelector().selectToken(token.getService(),user.getTokens()); assertEquals(firstToken,secondToken); } finally { conn.close(); } }

Class: org.apache.hadoop.hbase.security.token.TestZKSecretWatcher

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testKeyUpdate() throws Exception { assertTrue(KEY_MASTER.isMaster()); assertFalse(KEY_SLAVE.isMaster()); int maxKeyId=0; KEY_MASTER.rollCurrentKey(); AuthenticationKey key1=KEY_MASTER.getCurrentKey(); assertNotNull(key1); LOG.debug("Master current key: " + key1.getKeyId()); Thread.sleep(1000); AuthenticationKey slaveCurrent=KEY_SLAVE.getCurrentKey(); assertNotNull(slaveCurrent); assertEquals(key1,slaveCurrent); LOG.debug("Slave current key: " + slaveCurrent.getKeyId()); KEY_MASTER.rollCurrentKey(); AuthenticationKey key2=KEY_MASTER.getCurrentKey(); LOG.debug("Master new current key: " + key2.getKeyId()); KEY_MASTER.rollCurrentKey(); AuthenticationKey key3=KEY_MASTER.getCurrentKey(); LOG.debug("Master new current key: " + key3.getKeyId()); key1.setExpiration(EnvironmentEdgeManager.currentTime() - 1000); KEY_MASTER.removeExpiredKeys(); assertNull(KEY_MASTER.getKey(key1.getKeyId())); KEY_SLAVE.getLatch().await(); AuthenticationKey slave2=KEY_SLAVE.getKey(key2.getKeyId()); assertNotNull(slave2); assertEquals(key2,slave2); AuthenticationKey slave3=KEY_SLAVE.getKey(key3.getKeyId()); assertNotNull(slave3); assertEquals(key3,slave3); slaveCurrent=KEY_SLAVE.getCurrentKey(); assertEquals(key3,slaveCurrent); LOG.debug("Slave current key: " + slaveCurrent.getKeyId()); assertNull(KEY_SLAVE.getKey(key1.getKeyId())); Configuration conf=TEST_UTIL.getConfiguration(); ZooKeeperWatcher zk=newZK(conf,"server3",new MockAbortable()); KEY_SLAVE2=new AuthenticationTokenSecretManager(conf,zk,"server3",60 * 60 * 1000,60 * 1000); KEY_SLAVE2.start(); Thread.sleep(1000); slave2=KEY_SLAVE2.getKey(key2.getKeyId()); assertNotNull(slave2); assertEquals(key2,slave2); slave3=KEY_SLAVE2.getKey(key3.getKeyId()); assertNotNull(slave3); assertEquals(key3,slave3); slaveCurrent=KEY_SLAVE2.getCurrentKey(); assertEquals(key3,slaveCurrent); assertNull(KEY_SLAVE2.getKey(key1.getKeyId())); KEY_MASTER.stop(); Thread.sleep(1000); assertFalse(KEY_MASTER.isMaster()); AuthenticationTokenSecretManager[] mgrs=new AuthenticationTokenSecretManager[]{KEY_SLAVE,KEY_SLAVE2}; AuthenticationTokenSecretManager newMaster=null; int tries=0; while (newMaster == null && tries++ < 5) { for ( AuthenticationTokenSecretManager mgr : mgrs) { if (mgr.isMaster()) { newMaster=mgr; break; } } if (newMaster == null) { Thread.sleep(500); } } assertNotNull(newMaster); AuthenticationKey current=newMaster.getCurrentKey(); assertTrue(current.getKeyId() >= slaveCurrent.getKeyId()); LOG.debug("New master, current key: " + current.getKeyId()); newMaster.rollCurrentKey(); AuthenticationKey newCurrent=newMaster.getCurrentKey(); LOG.debug("New master, rolled new current key: " + newCurrent.getKeyId()); assertTrue(newCurrent.getKeyId() > current.getKeyId()); ZooKeeperWatcher zk3=newZK(conf,"server4",new MockAbortable()); KEY_SLAVE3=new AuthenticationTokenSecretManager(conf,zk3,"server4",60 * 60 * 1000,60 * 1000); KEY_SLAVE3.start(); Thread.sleep(5000); newMaster.stop(); Thread.sleep(5000); assertFalse(newMaster.isMaster()); mgrs=new AuthenticationTokenSecretManager[]{KEY_SLAVE,KEY_SLAVE2,KEY_SLAVE3}; newMaster=null; tries=0; while (newMaster == null && tries++ < 5) { for ( AuthenticationTokenSecretManager mgr : mgrs) { if (mgr.isMaster()) { newMaster=mgr; break; } } if (newMaster == null) { Thread.sleep(500); } } assertNotNull(newMaster); AuthenticationKey current2=newMaster.getCurrentKey(); assertTrue(current2.getKeyId() >= newCurrent.getKeyId()); LOG.debug("New master 2, current key: " + current2.getKeyId()); newMaster.rollCurrentKey(); AuthenticationKey newCurrent2=newMaster.getCurrentKey(); LOG.debug("New master 2, rolled new current key: " + newCurrent2.getKeyId()); assertTrue(newCurrent2.getKeyId() > current2.getKeyId()); }

Class: org.apache.hadoop.hbase.security.token.TestZKSecretWatcherRefreshKeys

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testRefreshKeys() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); ZooKeeperWatcher zk=newZK(conf,"127.0.0.1",new MockAbortable()); AuthenticationTokenSecretManager keyManager=new AuthenticationTokenSecretManager(conf,zk,"127.0.0.1",60 * 60 * 1000,60 * 1000); ZKSecretWatcher watcher=new ZKSecretWatcher(conf,zk,keyManager); ZKUtil.deleteChildrenRecursively(zk,watcher.getKeysParentZNode()); Integer[] keys={1,2,3,4,5,6}; for ( Integer key : keys) { AuthenticationKey ak=new AuthenticationKey(key,System.currentTimeMillis() + 600 * 1000,null); ZKUtil.createWithParents(zk,ZKUtil.joinZNode(watcher.getKeysParentZNode(),key.toString()),Writables.getBytes(ak)); } Assert.assertNull(keyManager.getCurrentKey()); watcher.refreshKeys(); for ( Integer key : keys) { Assert.assertNotNull(keyManager.getKey(key.intValue())); } }

Class: org.apache.hadoop.hbase.security.visibility.TestDefaultScanLabelGeneratorStack

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testDefaultScanLabelGeneratorStack() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=TEST_UTIL.createTable(tableName,CF)){ Put put=new Put(ROW_1); put.addColumn(CF,Q1,HConstants.LATEST_TIMESTAMP,value1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put=new Put(ROW_1); put.addColumn(CF,Q2,HConstants.LATEST_TIMESTAMP,value2); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(ROW_1); put.addColumn(CF,Q3,HConstants.LATEST_TIMESTAMP,value3); table.put(put); return null; } } } ); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(1); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q1,0,Q1.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value1,0,value1.length)); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q2,0,Q2.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value2,0,value2.length)); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value3,0,value3.length)); return null; } } } ); TESTUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(1); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q2,0,Q2.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value2,0,value2.length)); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value3,0,value3.length)); Scan s1=new Scan(); s1.setAuthorizations(new Authorizations(new String[]{SECRET,CONFIDENTIAL})); ResultScanner scanner1=table.getScanner(s1); Result[] next1=scanner1.next(1); assertTrue(next1.length == 1); CellScanner cellScanner1=next1[0].cellScanner(); cellScanner1.advance(); Cell current1=cellScanner1.current(); assertTrue(Bytes.equals(current1.getRowArray(),current1.getRowOffset(),current1.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current1.getQualifierArray(),current1.getQualifierOffset(),current1.getQualifierLength(),Q2,0,Q2.length)); assertTrue(Bytes.equals(current1.getValueArray(),current1.getValueOffset(),current1.getValueLength(),value2,0,value2.length)); cellScanner1.advance(); current1=cellScanner1.current(); assertTrue(Bytes.equals(current1.getRowArray(),current1.getRowOffset(),current1.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current1.getQualifierArray(),current1.getQualifierOffset(),current1.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current1.getValueArray(),current1.getValueOffset(),current1.getValueLength(),value3,0,value3.length)); Scan s2=new Scan(); s2.setAuthorizations(new Authorizations(new String[]{SECRET})); ResultScanner scanner2=table.getScanner(s2); Result next2=scanner2.next(); CellScanner cellScanner2=next2.cellScanner(); cellScanner2.advance(); Cell current2=cellScanner2.current(); assertTrue(Bytes.equals(current2.getRowArray(),current2.getRowOffset(),current2.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current2.getQualifierArray(),current2.getQualifierOffset(),current2.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current2.getValueArray(),current2.getValueOffset(),current2.getValueLength(),value3,0,value3.length)); assertFalse(cellScanner2.advance()); return null; } } } ); }

Class: org.apache.hadoop.hbase.security.visibility.TestEnforcingScanLabelGenerator

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testEnforcingScanLabelGenerator() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=TEST_UTIL.createTable(tableName,CF)){ Put put=new Put(ROW_1); put.addColumn(CF,Q1,HConstants.LATEST_TIMESTAMP,value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put=new Put(ROW_1); put.addColumn(CF,Q2,HConstants.LATEST_TIMESTAMP,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(ROW_1); put.addColumn(CF,Q3,HConstants.LATEST_TIMESTAMP,value); table.put(put); return null; } } } ); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Get get=new Get(ROW_1); Result result=table.get(get); assertTrue("Missing authorization",result.containsColumn(CF,Q1)); assertTrue("Missing authorization",result.containsColumn(CF,Q2)); assertTrue("Missing authorization",result.containsColumn(CF,Q3)); return null; } } } ); TESTUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Get get=new Get(ROW_1); get.setAuthorizations(new Authorizations(new String[]{SECRET,CONFIDENTIAL})); Result result=table.get(get); assertFalse("Inappropriate authorization",result.containsColumn(CF,Q1)); assertTrue("Missing authorization",result.containsColumn(CF,Q2)); assertTrue("Inappropriate filtering",result.containsColumn(CF,Q3)); get=new Get(ROW_1); result=table.get(get); assertFalse("Inappropriate authorization",result.containsColumn(CF,Q1)); assertTrue("Missing authorization",result.containsColumn(CF,Q2)); assertTrue("Inappropriate filtering",result.containsColumn(CF,Q3)); return null; } } } ); }

Class: org.apache.hadoop.hbase.security.visibility.TestExpressionExpander

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPositiveCases() throws Exception { ExpressionExpander expander=new ExpressionExpander(); NonLeafExpressionNode exp1=new NonLeafExpressionNode(Operator.NOT,new LeafExpressionNode("a")); ExpressionNode result=expander.expand(exp1); assertTrue(result instanceof NonLeafExpressionNode); NonLeafExpressionNode nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.NOT,nlResult.getOperator()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); NonLeafExpressionNode exp2=new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b")); result=expander.expand(exp2); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(2,nlResult.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp3=new NonLeafExpressionNode(Operator.AND,new LeafExpressionNode("a"),new LeafExpressionNode("b")); result=expander.expand(exp3); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.AND,nlResult.getOperator()); assertEquals(2,nlResult.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp4=new NonLeafExpressionNode(Operator.OR,new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b")),new LeafExpressionNode("c")); result=expander.expand(exp4); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(3,nlResult.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); assertEquals("c",((LeafExpressionNode)nlResult.getChildExps().get(2)).getIdentifier()); NonLeafExpressionNode exp5=new NonLeafExpressionNode(Operator.AND,new NonLeafExpressionNode(Operator.AND,new LeafExpressionNode("a"),new LeafExpressionNode("b")),new LeafExpressionNode("c")); result=expander.expand(exp5); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.AND,nlResult.getOperator()); assertEquals(3,nlResult.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); assertEquals("c",((LeafExpressionNode)nlResult.getChildExps().get(2)).getIdentifier()); NonLeafExpressionNode exp6=new NonLeafExpressionNode(Operator.AND,new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b")),new LeafExpressionNode("c")); result=expander.expand(exp6); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(2,nlResult.getChildExps().size()); NonLeafExpressionNode temp=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(1); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp7=new NonLeafExpressionNode(Operator.OR,new NonLeafExpressionNode(Operator.AND,new LeafExpressionNode("a"),new LeafExpressionNode("b")),new LeafExpressionNode("c")); result=expander.expand(exp7); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(2,nlResult.getChildExps().size()); assertEquals("c",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); nlResult=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.AND,nlResult.getOperator()); assertEquals(2,nlResult.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp8=new NonLeafExpressionNode(Operator.AND); exp8.addChildExp(new NonLeafExpressionNode(Operator.OR,new NonLeafExpressionNode(Operator.AND,new LeafExpressionNode("a"),new LeafExpressionNode("b")),new LeafExpressionNode("c"))); exp8.addChildExp(new LeafExpressionNode("d")); result=expander.expand(exp8); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(2,nlResult.getChildExps().size()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(1); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp9=new NonLeafExpressionNode(Operator.OR); exp9.addChildExp(new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b"))); exp9.addChildExp(new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("c"),new LeafExpressionNode("d"))); result=expander.expand(exp9); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(4,nlResult.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); assertEquals("c",((LeafExpressionNode)nlResult.getChildExps().get(2)).getIdentifier()); assertEquals("d",((LeafExpressionNode)nlResult.getChildExps().get(3)).getIdentifier()); NonLeafExpressionNode exp10=new NonLeafExpressionNode(Operator.AND); exp10.addChildExp(new NonLeafExpressionNode(Operator.AND,new LeafExpressionNode("a"),new LeafExpressionNode("b"))); exp10.addChildExp(new NonLeafExpressionNode(Operator.AND,new LeafExpressionNode("c"),new LeafExpressionNode("d"))); result=expander.expand(exp10); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.AND,nlResult.getOperator()); assertEquals(4,nlResult.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlResult.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlResult.getChildExps().get(1)).getIdentifier()); assertEquals("c",((LeafExpressionNode)nlResult.getChildExps().get(2)).getIdentifier()); assertEquals("d",((LeafExpressionNode)nlResult.getChildExps().get(3)).getIdentifier()); NonLeafExpressionNode exp11=new NonLeafExpressionNode(Operator.AND); exp11.addChildExp(new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b"))); exp11.addChildExp(new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("c"),new LeafExpressionNode("d"))); result=expander.expand(exp11); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(4,nlResult.getChildExps().size()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(1); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(2); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(3); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp12=new NonLeafExpressionNode(Operator.AND); NonLeafExpressionNode tempExp1=new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b")); NonLeafExpressionNode tempExp2=new NonLeafExpressionNode(Operator.OR,tempExp1,new LeafExpressionNode("c")); NonLeafExpressionNode tempExp3=new NonLeafExpressionNode(Operator.OR,tempExp2,new LeafExpressionNode("d")); exp12.addChildExp(tempExp3); exp12.addChildExp(new LeafExpressionNode("e")); result=expander.expand(exp12); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(4,nlResult.getChildExps().size()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(1); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(2); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(3); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp13=new NonLeafExpressionNode(Operator.AND,new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b"),new LeafExpressionNode("c")),new LeafExpressionNode("d")); result=expander.expand(exp13); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(3,nlResult.getChildExps().size()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(1); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(2); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp15=new NonLeafExpressionNode(Operator.AND); NonLeafExpressionNode temp1=new NonLeafExpressionNode(Operator.AND); temp1.addChildExp(new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b"))); temp1.addChildExp(new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("c"),new LeafExpressionNode("d"))); exp15.addChildExp(temp1); exp15.addChildExp(new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("e"),new LeafExpressionNode("f"))); result=expander.expand(exp15); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.OR,nlResult.getOperator()); assertEquals(8,nlResult.getChildExps().size()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(1); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("f",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(2); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(3); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("f",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(4); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(5); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("f",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("c",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(6); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("e",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(7); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("f",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); temp=(NonLeafExpressionNode)temp.getChildExps().get(0); assertEquals(Operator.AND,temp.getOperator()); assertEquals(2,temp.getChildExps().size()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)temp.getChildExps().get(1)).getIdentifier()); NonLeafExpressionNode exp16=new NonLeafExpressionNode(Operator.NOT,new NonLeafExpressionNode(Operator.OR,new LeafExpressionNode("a"),new LeafExpressionNode("b"))); result=expander.expand(exp16); assertTrue(result instanceof NonLeafExpressionNode); nlResult=(NonLeafExpressionNode)result; assertEquals(Operator.AND,nlResult.getOperator()); assertEquals(2,nlResult.getChildExps().size()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(0); assertEquals(Operator.NOT,temp.getOperator()); assertEquals("a",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); temp=(NonLeafExpressionNode)nlResult.getChildExps().get(1); assertEquals(Operator.NOT,temp.getOperator()); assertEquals("b",((LeafExpressionNode)temp.getChildExps().get(0)).getIdentifier()); }

Class: org.apache.hadoop.hbase.security.visibility.TestExpressionParser

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPositiveCases() throws Exception { ExpressionNode node=parser.parse("abc"); assertTrue(node instanceof LeafExpressionNode); assertEquals("abc",((LeafExpressionNode)node).getIdentifier()); node=parser.parse("a&b|c&d"); assertTrue(node instanceof NonLeafExpressionNode); NonLeafExpressionNode nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("d",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("c",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); node=parser.parse("(a)"); assertTrue(node instanceof LeafExpressionNode); assertEquals("a",((LeafExpressionNode)node).getIdentifier()); node=parser.parse(" ( a & b )"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); node=parser.parse("((((a&b))))"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); node=parser.parse("( a | b ) & (cc|def)"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); NonLeafExpressionNode nlNodeLeft=(NonLeafExpressionNode)nlNode.getChildExps().get(0); NonLeafExpressionNode nlNodeRight=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.OR,nlNodeLeft.getOperator()); assertEquals(2,nlNodeLeft.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNodeLeft.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlNodeLeft.getChildExps().get(1)).getIdentifier()); assertEquals(Operator.OR,nlNodeRight.getOperator()); assertEquals(2,nlNodeRight.getChildExps().size()); assertEquals("cc",((LeafExpressionNode)nlNodeRight.getChildExps().get(0)).getIdentifier()); assertEquals("def",((LeafExpressionNode)nlNodeRight.getChildExps().get(1)).getIdentifier()); node=parser.parse("a&(cc|de)"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("cc",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("de",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); node=parser.parse("(a&b)|c"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("c",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); node=parser.parse("(a&b&c)|d"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("d",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("c",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); node=parser.parse("a&(b|(c|d))"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("c",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("d",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); node=parser.parse("(!a)"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.NOT,nlNode.getOperator()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); node=parser.parse("a&(!b)"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.NOT,nlNode.getOperator()); assertEquals(1,nlNode.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); node=parser.parse("!a&b"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.NOT,nlNode.getOperator()); assertEquals(1,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); node=parser.parse("!a&(!b)"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNodeLeft=(NonLeafExpressionNode)nlNode.getChildExps().get(0); nlNodeRight=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.NOT,nlNodeLeft.getOperator()); assertEquals(1,nlNodeLeft.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNodeLeft.getChildExps().get(0)).getIdentifier()); assertEquals(Operator.NOT,nlNodeRight.getOperator()); assertEquals(1,nlNodeRight.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNodeRight.getChildExps().get(0)).getIdentifier()); node=parser.parse("!a&!b"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNodeLeft=(NonLeafExpressionNode)nlNode.getChildExps().get(0); nlNodeRight=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.NOT,nlNodeLeft.getOperator()); assertEquals(1,nlNodeLeft.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNodeLeft.getChildExps().get(0)).getIdentifier()); assertEquals(Operator.NOT,nlNodeRight.getOperator()); assertEquals(1,nlNodeRight.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNodeRight.getChildExps().get(0)).getIdentifier()); node=parser.parse("!(a&b)"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.NOT,nlNode.getOperator()); assertEquals(1,nlNode.getChildExps().size()); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); node=parser.parse("a&!b"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("a",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.NOT,nlNode.getOperator()); assertEquals(1,nlNode.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); node=parser.parse("!((a | b) & !(c & !b))"); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.NOT,nlNode.getOperator()); assertEquals(1,nlNode.getChildExps().size()); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); assertTrue(nlNode.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNodeLeft=(NonLeafExpressionNode)nlNode.getChildExps().get(0); nlNodeRight=(NonLeafExpressionNode)nlNode.getChildExps().get(1); assertEquals(Operator.OR,nlNodeLeft.getOperator()); assertEquals("a",((LeafExpressionNode)nlNodeLeft.getChildExps().get(0)).getIdentifier()); assertEquals("b",((LeafExpressionNode)nlNodeLeft.getChildExps().get(1)).getIdentifier()); assertEquals(Operator.NOT,nlNodeRight.getOperator()); assertEquals(1,nlNodeRight.getChildExps().size()); nlNodeRight=(NonLeafExpressionNode)nlNodeRight.getChildExps().get(0); assertEquals(Operator.AND,nlNodeRight.getOperator()); assertEquals(2,nlNodeRight.getChildExps().size()); assertEquals("c",((LeafExpressionNode)nlNodeRight.getChildExps().get(0)).getIdentifier()); assertTrue(nlNodeRight.getChildExps().get(1) instanceof NonLeafExpressionNode); nlNodeRight=(NonLeafExpressionNode)nlNodeRight.getChildExps().get(1); assertEquals(Operator.NOT,nlNodeRight.getOperator()); assertEquals(1,nlNodeRight.getChildExps().size()); assertEquals("b",((LeafExpressionNode)nlNodeRight.getChildExps().get(0)).getIdentifier()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCasesSeperatedByDoubleQuotes() throws Exception { ExpressionNode node=null; try { node=parser.parse("\u0027&\"|\u002b&\u003f"); fail("Excpetion must be thrown as there are special characters without quotes"); } catch ( ParseException e) { } node=parser.parse(CellVisibility.quote("\u0027") + "&" + CellVisibility.quote("\"")+ "|"+ CellVisibility.quote("\u002b" + "&" + "\u003f")); assertTrue(node instanceof NonLeafExpressionNode); NonLeafExpressionNode nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u002b" + "&" + "\u003f",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\"",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertEquals("\u0027",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); try { node=parser.parse(CellVisibility.quote("\u0027&\\") + "|" + CellVisibility.quote("\u002b" + "&" + "\\")+ CellVisibility.quote("$$\"")); fail("Excpetion must be thrown as there is not operator"); } catch ( ParseException e) { } node=parser.parse(CellVisibility.quote("\u0027" + "&" + "\\") + "|" + CellVisibility.quote("\u003f" + "&" + "\\")+ "&"+ CellVisibility.quote("$$\"")); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("$$\"",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u0027" + "&" + "\\",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("\u003f" + "&" + "\\",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); try { node=parser.parse(CellVisibility.quote("\u002b&\\") + "|" + CellVisibility.quote("\u0027&\\")+ "&"+ "\"$$"); fail("Excpetion must be thrown as there is no end quote"); } catch ( ParseException e) { } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testNonAsciiCases() throws Exception { ExpressionNode node=parser.parse(CellVisibility.quote("\u0027") + "&" + CellVisibility.quote("\u002b")+ "|"+ CellVisibility.quote("\u002d")+ "&"+ CellVisibility.quote("\u003f")); assertTrue(node instanceof NonLeafExpressionNode); NonLeafExpressionNode nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u003f",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u002d",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u002b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertEquals("\u0027",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); node=parser.parse(CellVisibility.quote("\u0027") + "&" + CellVisibility.quote("\u002b")+ "|"+ CellVisibility.quote("\u002d")+ "&"+ CellVisibility.quote("\u003f")); assertTrue(node instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)node; assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u003f",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.OR,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u002d",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode=(NonLeafExpressionNode)nlNode.getChildExps().get(0); assertEquals(Operator.AND,nlNode.getOperator()); assertEquals(2,nlNode.getChildExps().size()); assertEquals("\u002b",((LeafExpressionNode)nlNode.getChildExps().get(1)).getIdentifier()); assertEquals("\u0027",((LeafExpressionNode)nlNode.getChildExps().get(0)).getIdentifier()); }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLabels

InternalCallVerifier BooleanVerifier 
@Test public void testLabelsWithAppend() throws Throwable { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=TEST_UTIL.createTable(tableName,fam)){ byte[] row1=Bytes.toBytes("row1"); byte[] val=Bytes.toBytes("a"); Put put=new Put(row1); put.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,val); put.setCellVisibility(new CellVisibility(SECRET + " & " + CONFIDENTIAL)); table.put(put); Get get=new Get(row1); get.setAuthorizations(new Authorizations(SECRET)); Result result=table.get(get); assertTrue(result.isEmpty()); Append append=new Append(row1); append.add(fam,qual,Bytes.toBytes("b")); table.append(append); result=table.get(get); assertTrue(result.isEmpty()); append=new Append(row1); append.add(fam,qual,Bytes.toBytes("c")); append.setCellVisibility(new CellVisibility(SECRET)); table.append(append); result=table.get(get); assertTrue(!result.isEmpty()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsInScanThatDoesNotMatchAnyDefinedLabels() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,"(" + SECRET + "|"+ CONFIDENTIAL+ ")",PRIVATE)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations("SAMPLE")); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 0); } }

InternalCallVerifier BooleanVerifier 
@Test public void testLabelsWithIncrement() throws Throwable { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=TEST_UTIL.createTable(tableName,fam)){ byte[] row1=Bytes.toBytes("row1"); byte[] val=Bytes.toBytes(1L); Put put=new Put(row1); put.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,val); put.setCellVisibility(new CellVisibility(SECRET + " & " + CONFIDENTIAL)); table.put(put); Get get=new Get(row1); get.setAuthorizations(new Authorizations(SECRET)); Result result=table.get(get); assertTrue(result.isEmpty()); table.incrementColumnValue(row1,fam,qual,2L); result=table.get(get); assertTrue(result.isEmpty()); Increment increment=new Increment(row1); increment.addColumn(fam,qual,2L); increment.setCellVisibility(new CellVisibility(SECRET)); table.increment(increment); result=table.get(get); assertTrue(!result.isEmpty()); } }

InternalCallVerifier BooleanVerifier 
@Test public void testSecurityCapabilities() throws Exception { List capabilities=TEST_UTIL.getConnection().getAdmin().getSecurityCapabilities(); assertTrue("CELL_VISIBILITY capability is missing",capabilities.contains(SecurityCapability.CELL_VISIBILITY)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsOnKillingOfRSContainingLabelsTable() throws Exception { List regionServerThreads=TEST_UTIL.getHBaseCluster().getRegionServerThreads(); int liveRS=0; for ( RegionServerThread rsThreads : regionServerThreads) { if (!rsThreads.getRegionServer().isAborted()) { liveRS++; } } if (liveRS == 1) { TEST_UTIL.getHBaseCluster().startRegionServer(); } Thread t1=new Thread(){ public void run(){ List regionServerThreads=TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for ( RegionServerThread rsThread : regionServerThreads) { List onlineRegions=rsThread.getRegionServer().getOnlineRegions(LABELS_TABLE_NAME); if (onlineRegions.size() > 0) { rsThread.getRegionServer().abort("Aborting "); killedRS=true; break; } } } } ; t1.start(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Thread t=new Thread(){ public void run(){ try { while (!killedRS) { Thread.sleep(1); } createTableAndWriteDataWithLabels(tableName,"(" + SECRET + "|"+ CONFIDENTIAL+ ")",PRIVATE); } catch ( Exception e) { } } } ; t.start(); regionServerThreads=TEST_UTIL.getHBaseCluster().getRegionServerThreads(); while (!killedRS) { Thread.sleep(10); } regionServerThreads=TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for ( RegionServerThread rsThread : regionServerThreads) { while (true) { if (!rsThread.getRegionServer().isAborted()) { List onlineRegions=rsThread.getRegionServer().getOnlineRegions(LABELS_TABLE_NAME); if (onlineRegions.size() > 0) { break; } else { Thread.sleep(10); } } else { break; } } } TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(),50000); t.join(); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClearUserAuths() throws Throwable { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ public Void run() throws Exception { String[] auths={SECRET,CONFIDENTIAL,PRIVATE}; String user="testUser"; try (Connection conn=ConnectionFactory.createConnection(conf)){ VisibilityClient.setAuths(conn,auths,user); } catch ( Throwable e) { fail("Should not have failed"); } auths=new String[]{SECRET,PUBLIC,CONFIDENTIAL}; VisibilityLabelsResponse response=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ response=VisibilityClient.clearAuths(conn,auths,user); } catch ( Throwable e) { fail("Should not have failed"); } List resultList=response.getResultList(); assertEquals(3,resultList.size()); assertTrue(resultList.get(0).getException().getValue().isEmpty()); assertEquals("org.apache.hadoop.hbase.DoNotRetryIOException",resultList.get(1).getException().getName()); assertTrue(Bytes.toString(resultList.get(1).getException().getValue().toByteArray()).contains("org.apache.hadoop.hbase.security.visibility.InvalidLabelException: " + "Label 'public' is not set for the user testUser")); assertTrue(resultList.get(2).getException().getValue().isEmpty()); try (Connection connection=ConnectionFactory.createConnection(conf);Table ht=connection.getTable(LABELS_TABLE_NAME)){ ResultScanner scanner=ht.getScanner(new Scan()); Result result=null; List results=new ArrayList(); while ((result=scanner.next()) != null) { results.add(result); } List curAuths=extractAuths(user,results); assertTrue(curAuths.contains(PRIVATE)); assertEquals(1,curAuths.size()); } GetAuthsResponse authsResponse=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ authsResponse=VisibilityClient.getAuths(conn,user); } catch ( Throwable e) { fail("Should not have failed"); } List authsList=new ArrayList(); for ( ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } assertEquals(1,authsList.size()); assertTrue(authsList.contains(PRIVATE)); return null; } } ; SUPERUSER.runAs(action); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testAuthorizationsWithSpecialUnicodeCharacters() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,CellVisibility.quote(UC1) + "|" + CellVisibility.quote(UC2),CellVisibility.quote(UC1),CellVisibility.quote(UNICODE_VIS_TAG))){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(UC1,UC2,ACCENT,UNICODE_VIS_TAG)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 3); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); cellScanner=next[2].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row3,0,row3.length)); } }

InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testLabelsWithCheckAndPut() throws Throwable { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=TEST_UTIL.createTable(tableName,fam)){ byte[] row1=Bytes.toBytes("row1"); Put put=new Put(row1); put.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,value); put.setCellVisibility(new CellVisibility(SECRET + " & " + CONFIDENTIAL)); table.checkAndPut(row1,fam,qual,null,put); byte[] row2=Bytes.toBytes("row2"); put=new Put(row2); put.addColumn(fam,qual,HConstants.LATEST_TIMESTAMP,value); put.setCellVisibility(new CellVisibility(SECRET)); table.checkAndPut(row2,fam,qual,null,put); Scan scan=new Scan(); scan.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner=table.getScanner(scan); Result result=scanner.next(); assertTrue(!result.isEmpty()); assertTrue(Bytes.equals(row2,result.getRow())); result=scanner.next(); assertNull(result); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testMutateRow() throws Exception { final byte[] qual2=Bytes.toBytes("qual2"); TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor col=new HColumnDescriptor(fam); desc.addFamily(col); TEST_UTIL.getHBaseAdmin().createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put p1=new Put(row1); p1.addColumn(fam,qual,value); p1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); Put p2=new Put(row1); p2.addColumn(fam,qual2,value); p2.setCellVisibility(new CellVisibility(SECRET)); RowMutations rm=new RowMutations(row1); rm.add(p1); rm.add(p2); table.mutateRow(rm); Get get=new Get(row1); get.setAuthorizations(new Authorizations(CONFIDENTIAL)); Result result=table.get(get); assertTrue(result.containsColumn(fam,qual)); assertFalse(result.containsColumn(fam,qual2)); get.setAuthorizations(new Authorizations(SECRET)); result=table.get(get); assertFalse(result.containsColumn(fam,qual)); assertTrue(result.containsColumn(fam,qual2)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsWithGet() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,SECRET + "&" + CONFIDENTIAL+ "&!"+ PRIVATE,SECRET + "&" + CONFIDENTIAL+ "&"+ PRIVATE)){ Get get=new Get(row1); get.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); Result result=table.get(get); assertTrue(!result.isEmpty()); Cell cell=result.getColumnLatestCell(fam,qual); assertTrue(Bytes.equals(value,0,value.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testSimpleVisibilityLabels() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,SECRET + "|" + CONFIDENTIAL,PRIVATE + "|" + CONFIDENTIAL)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL,PRIVATE)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testFlushedFileWithVisibilityTags() throws Exception { final byte[] qual2=Bytes.toBytes("qual2"); TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor col=new HColumnDescriptor(fam); desc.addFamily(col); TEST_UTIL.getHBaseAdmin().createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put p1=new Put(row1); p1.addColumn(fam,qual,value); p1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); Put p2=new Put(row1); p2.addColumn(fam,qual2,value); p2.setCellVisibility(new CellVisibility(SECRET)); RowMutations rm=new RowMutations(row1); rm.add(p1); rm.add(p2); table.mutateRow(rm); } TEST_UTIL.getHBaseAdmin().flush(tableName); List regions=TEST_UTIL.getHBaseCluster().getRegions(tableName); Store store=regions.get(0).getStore(fam); Collection storefiles=store.getStorefiles(); assertTrue(storefiles.size() > 0); for ( StoreFile storeFile : storefiles) { assertTrue(storeFile.getReader().getHFileReader().getFileContext().isIncludesTags()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60 * 1000) public void testVisibilityLabelsOnRSRestart() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); List regionServerThreads=TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for ( RegionServerThread rsThread : regionServerThreads) { rsThread.getRegionServer().abort("Aborting "); } RegionServerThread rs=TEST_UTIL.getHBaseCluster().startRegionServer(); waitForLabelsRegionAvailability(rs.getRegionServer()); try (Table table=createTableAndWriteDataWithLabels(tableName,"(" + SECRET + "|"+ CONFIDENTIAL+ ")",PRIVATE)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testMultipleVersions() throws Exception { final byte[] r1=Bytes.toBytes("row1"); final byte[] r2=Bytes.toBytes("row2"); final byte[] v1=Bytes.toBytes("100"); final byte[] v2=Bytes.toBytes("101"); final byte[] fam2=Bytes.toBytes("info2"); final byte[] qual2=Bytes.toBytes("qual2"); TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor col=new HColumnDescriptor(fam); desc.addFamily(col); col=new HColumnDescriptor(fam2); col.setMaxVersions(5); desc.addFamily(col); TEST_UTIL.getHBaseAdmin().createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(r1); put.addColumn(fam,qual,3l,v1); put.addColumn(fam,qual2,3l,v1); put.addColumn(fam2,qual,3l,v1); put.addColumn(fam2,qual2,3l,v1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put=new Put(r1); put.addColumn(fam,qual,4l,v2); put.addColumn(fam,qual2,4l,v2); put.addColumn(fam2,qual,4l,v2); put.addColumn(fam2,qual2,4l,v2); put.setCellVisibility(new CellVisibility(PRIVATE)); table.put(put); put=new Put(r2); put.addColumn(fam,qual,3l,v1); put.addColumn(fam,qual2,3l,v1); put.addColumn(fam2,qual,3l,v1); put.addColumn(fam2,qual2,3l,v1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put=new Put(r2); put.addColumn(fam,qual,4l,v2); put.addColumn(fam,qual2,4l,v2); put.addColumn(fam2,qual,4l,v2); put.addColumn(fam2,qual2,4l,v2); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); Scan s=new Scan(); s.setMaxVersions(1); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner=table.getScanner(s); Result result=scanner.next(); assertTrue(Bytes.equals(r1,result.getRow())); assertNull(result.getColumnLatestCell(fam,qual)); assertNull(result.getColumnLatestCell(fam,qual2)); Cell cell=result.getColumnLatestCell(fam2,qual); assertNotNull(cell); assertTrue(Bytes.equals(v1,0,v1.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); cell=result.getColumnLatestCell(fam2,qual2); assertNotNull(cell); assertTrue(Bytes.equals(v1,0,v1.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); result=scanner.next(); assertTrue(Bytes.equals(r2,result.getRow())); cell=result.getColumnLatestCell(fam,qual); assertNotNull(cell); assertTrue(Bytes.equals(v2,0,v2.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); cell=result.getColumnLatestCell(fam,qual2); assertNotNull(cell); assertTrue(Bytes.equals(v2,0,v2.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); cell=result.getColumnLatestCell(fam2,qual); assertNotNull(cell); assertTrue(Bytes.equals(v2,0,v2.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); cell=result.getColumnLatestCell(fam2,qual2); assertNotNull(cell); assertTrue(Bytes.equals(v2,0,v2.length,cell.getValueArray(),cell.getValueOffset(),cell.getValueLength())); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testSimpleVisibilityLabelsWithUniCodeCharacters() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,SECRET + "|" + CellVisibility.quote(COPYRIGHT),"(" + CellVisibility.quote(COPYRIGHT) + "&"+ CellVisibility.quote(ACCENT)+ ")|"+ CONFIDENTIAL,CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL,PRIVATE,COPYRIGHT,ACCENT,UNICODE_VIS_TAG)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 3); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); cellScanner=next[2].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row3,0,row3.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testVisibilityLabelsWithComplexLabels() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,"(" + SECRET + "|"+ CONFIDENTIAL+ ")"+ "&"+ "!"+ TOPSECRET,"(" + PRIVATE + "&"+ CONFIDENTIAL+ "&"+ SECRET+ ")","(" + PRIVATE + "&"+ CONFIDENTIAL+ "&"+ SECRET+ ")","(" + PRIVATE + "&"+ CONFIDENTIAL+ "&"+ SECRET+ ")")){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(TOPSECRET,CONFIDENTIAL,PRIVATE,PUBLIC,SECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(4); assertEquals(3,next.length); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row3,0,row3.length)); cellScanner=next[2].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row4,0,row4.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsInGetThatDoesNotMatchAnyDefinedLabels() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,"(" + SECRET + "|"+ CONFIDENTIAL+ ")",PRIVATE)){ Get get=new Get(row1); get.setAuthorizations(new Authorizations("SAMPLE")); Result result=table.get(get); assertTrue(result.isEmpty()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsThatDoesNotPassTheCriteria() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,"(" + SECRET + "|"+ CONFIDENTIAL+ ")",PRIVATE)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(PUBLIC)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 0); } }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsOpWithDifferentUsersNoACL

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLabelsTableOpsWithDifferentUsers() throws Throwable { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.setAuths(conn,new String[]{CONFIDENTIAL,PRIVATE},"user1"); } catch ( Throwable e) { } return null; } } ; VisibilityLabelsResponse response=SUPERUSER.runAs(action); assertTrue(response.getResult(0).getException().getValue().isEmpty()); assertTrue(response.getResult(1).getException().getValue().isEmpty()); action=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.setAuths(conn,new String[]{CONFIDENTIAL,PRIVATE},"user3"); } catch ( Throwable e) { } return null; } } ; response=NORMAL_USER1.runAs(action); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(0).getException().getName()); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(1).getException().getName()); PrivilegedExceptionAction action1=new PrivilegedExceptionAction(){ public GetAuthsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.getAuths(conn,"user1"); } catch ( Throwable e) { } return null; } } ; GetAuthsResponse authsResponse=NORMAL_USER.runAs(action1); assertTrue(authsResponse.getAuthList().isEmpty()); authsResponse=NORMAL_USER1.runAs(action1); assertTrue(authsResponse.getAuthList().isEmpty()); authsResponse=SUPERUSER.runAs(action1); List authsList=new ArrayList(); for ( ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } assertEquals(2,authsList.size()); assertTrue(authsList.contains(CONFIDENTIAL)); assertTrue(authsList.contains(PRIVATE)); PrivilegedExceptionAction action2=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.clearAuths(conn,new String[]{CONFIDENTIAL,PRIVATE},"user1"); } catch ( Throwable e) { } return null; } } ; response=NORMAL_USER1.runAs(action2); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(0).getException().getName()); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(1).getException().getName()); response=SUPERUSER.runAs(action2); assertTrue(response.getResult(0).getException().getValue().isEmpty()); assertTrue(response.getResult(1).getException().getValue().isEmpty()); authsResponse=SUPERUSER.runAs(action1); assertTrue(authsResponse.getAuthList().isEmpty()); }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsReplication

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityReplication() throws Exception { int retry=0; try (Table table=writeData(TABLE_NAME,"(" + SECRET + "&"+ PUBLIC+ ")"+ "|("+ CONFIDENTIAL+ ")&("+ TOPSECRET+ ")","(" + PRIVATE + "|"+ CONFIDENTIAL+ ")&("+ PUBLIC+ "|"+ TOPSECRET+ ")","(" + SECRET + "|"+ CONFIDENTIAL+ ")"+ "&"+ "!"+ TOPSECRET,CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL,PRIVATE,TOPSECRET,UNICODE_VIS_TAG)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(4); assertTrue(next.length == 4); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); cellScanner=next[2].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row3,0,row3.length)); cellScanner=next[3].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row4,0,row4.length)); try (Table table2=TEST_UTIL1.getConnection().getTable(TABLE_NAME)){ s=new Scan(); scanner=table2.getScanner(s); next=scanner.next(4); while (next.length == 0 && retry <= 10) { scanner=table2.getScanner(s); next=scanner.next(4); Thread.sleep(2000); retry++; } assertTrue(next.length == 4); verifyGet(row1,expectedVisString[0],expected[0],false,TOPSECRET,CONFIDENTIAL); TestCoprocessorForTagsAtSink.tags.clear(); verifyGet(row2,expectedVisString[1],expected[1],false,CONFIDENTIAL,PUBLIC); TestCoprocessorForTagsAtSink.tags.clear(); verifyGet(row3,expectedVisString[2],expected[2],false,PRIVATE,SECRET); verifyGet(row3,"",expected[3],true,TOPSECRET,SECRET); verifyGet(row4,expectedVisString[3],expected[4],false,UNICODE_VIS_TAG,SECRET); } } }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithACL

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testScanForUserWithFewerLabelAuthsThanLabelsInScanAuthorizations() throws Throwable { String[] auths={SECRET}; String user="user2"; VisibilityClient.setAuths(TEST_UTIL.getConnection(),auths,user); TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); final Table table=createTableAndWriteDataWithLabels(tableName,SECRET + "&" + CONFIDENTIAL+ "&!"+ PRIVATE,SECRET + "&!" + PRIVATE); SecureTestUtil.grantOnTable(TEST_UTIL,NORMAL_USER2.getShortName(),tableName,null,null,Permission.Action.READ); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ public Void run() throws Exception { Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); try (Connection connection=ConnectionFactory.createConnection(conf);Table t=connection.getTable(table.getName())){ ResultScanner scanner=t.getScanner(s); Result result=scanner.next(); assertTrue(!result.isEmpty()); assertTrue(Bytes.equals(Bytes.toBytes("row2"),result.getRow())); result=scanner.next(); assertNull(result); } return null; } } ; NORMAL_USER2.runAs(scanAction); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testLabelsTableOpsWithDifferentUsers() throws Throwable { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.addLabels(conn,new String[]{"l1","l2"}); } catch ( Throwable e) { } return null; } } ; VisibilityLabelsResponse response=NORMAL_USER1.runAs(action); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(0).getException().getName()); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(1).getException().getName()); action=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.setAuths(conn,new String[]{CONFIDENTIAL,PRIVATE},"user1"); } catch ( Throwable e) { } return null; } } ; response=NORMAL_USER1.runAs(action); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(0).getException().getName()); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(1).getException().getName()); action=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.setAuths(conn,new String[]{CONFIDENTIAL,PRIVATE},"user1"); } catch ( Throwable e) { } return null; } } ; response=SUPERUSER.runAs(action); assertTrue(response.getResult(0).getException().getValue().isEmpty()); assertTrue(response.getResult(1).getException().getValue().isEmpty()); action=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.clearAuths(conn,new String[]{CONFIDENTIAL,PRIVATE},"user1"); } catch ( Throwable e) { } return null; } } ; response=NORMAL_USER1.runAs(action); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(0).getException().getName()); assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException",response.getResult(1).getException().getName()); response=VisibilityClient.clearAuths(TEST_UTIL.getConnection(),new String[]{CONFIDENTIAL,PRIVATE},"user1"); assertTrue(response.getResult(0).getException().getValue().isEmpty()); assertTrue(response.getResult(1).getException().getValue().isEmpty()); VisibilityClient.setAuths(TEST_UTIL.getConnection(),new String[]{CONFIDENTIAL,PRIVATE},"user3"); PrivilegedExceptionAction action1=new PrivilegedExceptionAction(){ public GetAuthsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.getAuths(conn,"user3"); } catch ( Throwable e) { } return null; } } ; GetAuthsResponse authsResponse=NORMAL_USER1.runAs(action1); assertNull(authsResponse); authsResponse=SUPERUSER.runAs(action1); List authsList=new ArrayList(); for ( ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } assertEquals(2,authsList.size()); assertTrue(authsList.contains(CONFIDENTIAL)); assertTrue(authsList.contains(PRIVATE)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testScanForSuperUserWithFewerLabelAuths() throws Throwable { String[] auths={SECRET}; String user="admin"; try (Connection conn=ConnectionFactory.createConnection(conf)){ VisibilityClient.setAuths(conn,auths,user); } TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); final Table table=createTableAndWriteDataWithLabels(tableName,SECRET + "&" + CONFIDENTIAL+ "&!"+ PRIVATE,SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ public Void run() throws Exception { Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); try (Connection connection=ConnectionFactory.createConnection(conf);Table t=connection.getTable(table.getName())){ ResultScanner scanner=t.getScanner(s); Result[] result=scanner.next(5); assertTrue(result.length == 2); } return null; } } ; SUPERUSER.runAs(scanAction); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testGetForSuperUserWithFewerLabelAuths() throws Throwable { String[] auths={SECRET}; String user="admin"; VisibilityClient.setAuths(TEST_UTIL.getConnection(),auths,user); TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); final Table table=createTableAndWriteDataWithLabels(tableName,SECRET + "&" + CONFIDENTIAL+ "&!"+ PRIVATE,SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ public Void run() throws Exception { Get g=new Get(row1); g.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); try (Connection connection=ConnectionFactory.createConnection(conf);Table t=connection.getTable(table.getName())){ Result result=t.get(g); assertTrue(!result.isEmpty()); } return null; } } ; SUPERUSER.runAs(scanAction); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsForUserWithNoAuths() throws Throwable { String user="admin"; String[] auths={SECRET}; try (Connection conn=ConnectionFactory.createConnection(conf)){ VisibilityClient.clearAuths(conn,auths,user); VisibilityClient.setAuths(conn,auths,"user1"); } TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); final Table table=createTableAndWriteDataWithLabels(tableName,SECRET); SecureTestUtil.grantOnTable(TEST_UTIL,NORMAL_USER1.getShortName(),tableName,null,null,Permission.Action.READ); SecureTestUtil.grantOnTable(TEST_UTIL,NORMAL_USER2.getShortName(),tableName,null,null,Permission.Action.READ); PrivilegedExceptionAction getAction=new PrivilegedExceptionAction(){ public Void run() throws Exception { Get g=new Get(row1); g.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); try (Connection connection=ConnectionFactory.createConnection(conf);Table t=connection.getTable(table.getName())){ Result result=t.get(g); assertTrue(result.isEmpty()); } return null; } } ; NORMAL_USER2.runAs(getAction); }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDefaultVisLabelService

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddLabels() throws Throwable { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ public VisibilityLabelsResponse run() throws Exception { String[] labels={"L1",SECRET,"L2","invalid~","L3"}; VisibilityLabelsResponse response=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ response=VisibilityClient.addLabels(conn,labels); } catch ( Throwable e) { fail("Should not have thrown exception"); } List resultList=response.getResultList(); assertEquals(5,resultList.size()); assertTrue(resultList.get(0).getException().getValue().isEmpty()); assertEquals("org.apache.hadoop.hbase.DoNotRetryIOException",resultList.get(1).getException().getName()); assertTrue(Bytes.toString(resultList.get(1).getException().getValue().toByteArray()).contains("org.apache.hadoop.hbase.security.visibility.LabelAlreadyExistsException: " + "Label 'secret' already exists")); assertTrue(resultList.get(2).getException().getValue().isEmpty()); assertTrue(resultList.get(3).getException().getValue().isEmpty()); assertTrue(resultList.get(4).getException().getValue().isEmpty()); return null; } } ; SUPERUSER.runAs(action); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListLabelsWithRegEx() throws Throwable { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ public ListLabelsResponse run() throws Exception { ListLabelsResponse response=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ response=VisibilityClient.listLabels(conn,".*secret"); } catch ( Throwable e) { fail("Should not have thrown exception"); } List labels=response.getLabelList(); assertEquals(2,labels.size()); assertTrue(labels.contains(ByteString.copyFrom(SECRET.getBytes()))); assertTrue(labels.contains(ByteString.copyFrom(TOPSECRET.getBytes()))); return null; } } ; SUPERUSER.runAs(action); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListLabels() throws Throwable { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ public ListLabelsResponse run() throws Exception { ListLabelsResponse response=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ response=VisibilityClient.listLabels(conn,null); } catch ( Throwable e) { fail("Should not have thrown exception"); } List labels=response.getLabelList(); assertEquals(12,labels.size()); assertTrue(labels.contains(ByteString.copyFrom(SECRET.getBytes()))); assertTrue(labels.contains(ByteString.copyFrom(TOPSECRET.getBytes()))); assertTrue(labels.contains(ByteString.copyFrom(CONFIDENTIAL.getBytes()))); assertTrue(labels.contains(ByteString.copyFrom("ABC".getBytes()))); assertTrue(labels.contains(ByteString.copyFrom("XYZ".getBytes()))); assertFalse(labels.contains(ByteString.copyFrom(SYSTEM_LABEL.getBytes()))); return null; } } ; SUPERUSER.runAs(action); }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteFamilySpecificTimeStampWithMulipleVersions() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d.addFamily(fam,126l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(6); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnWithSpecificTimeStampUsingMultipleVersionsUnMatchingVisExpression() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d.addColumn(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsWithDeleteColumnsWithNoMatchVisExpWithMultipleVersionsNoTimestamp() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam,qual); table.delete(d); d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumns(fam,qual); table.delete(d); d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d.addColumns(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testDeleteFamilyWithoutCellVisibilityWithMulipleVersions() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPutsWithoutVisibility(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteColumnsWithAndWithoutVisibilityLabels() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(row1); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam,qual,HConstants.LATEST_TIMESTAMP); table.delete(d); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); d=new Delete(row1); d.addColumns(fam,qual,HConstants.LATEST_TIMESTAMP); table.delete(d); scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice2() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumn(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumn(fam,qual,127l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); assertEquals(current.getTimestamp(),127l); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsWithDeleteFamilyWithNoMatchingVisExpWithMultipleVersionsNoTimestamp() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { Delete d1=new Delete(row1); d1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d1.addFamily(fam); Delete d2=new Delete(row1); d2.setCellVisibility(new CellVisibility(SECRET)); d2.addFamily(fam); Delete d3=new Delete(row1); d3.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d3.addFamily(fam); try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ table.delete(createList(d1,d2,d3)); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); scanner.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteColumnsWithoutAndWithVisibilityLabels() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(row1); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); Delete d=new Delete(row1); d.addColumns(fam,qual,HConstants.LATEST_TIMESTAMP); table.delete(d); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,1); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam,qual,HConstants.LATEST_TIMESTAMP); table.delete(d); scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsWithDeleteColumns() throws Throwable { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,SECRET + "&" + TOPSECRET,SECRET)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "&" + SECRET)); d.addColumns(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersionsNoTimestamp() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d1=new Delete(row1); d1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d1.addColumns(fam,qual); table.delete(d1); Delete d2=new Delete(row1); d2.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d2.addColumns(fam,qual); table.delete(d2); Delete d3=new Delete(row1); d3.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d3.addColumns(fam,qual); table.delete(d3); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(1,next.length); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnWithLatestTimeStampUsingMultipleVersions() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice1() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")"+ "|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumn(fam,qual,127l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam,qual,127l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); assertEquals(current.getTimestamp(),127l); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteColumnsWithDiffColsAndTags() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual1,125l,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual1,126l,value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { Delete d1=new Delete(row1); d1.setCellVisibility(new CellVisibility(SECRET)); d1.addColumns(fam,qual,126l); Delete d2=new Delete(row1); d2.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d2.addColumns(fam,qual1,125l); try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ table.delete(createList(d1,d2)); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,1); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteWithNoVisibilitiesForPutsAndDeletes() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); Put p=new Put(Bytes.toBytes("row1")); p.addColumn(fam,qual,value); Table table=TEST_UTIL.getConnection().getTable(tableName); table.put(p); p=new Put(Bytes.toBytes("row1")); p.addColumn(fam,qual1,value); table.put(p); p=new Put(Bytes.toBytes("row2")); p.addColumn(fam,qual,value); table.put(p); p=new Put(Bytes.toBytes("row2")); p.addColumn(fam,qual1,value); table.put(p); Delete d=new Delete(Bytes.toBytes("row1")); table.delete(d); Get g=new Get(Bytes.toBytes("row1")); g.setMaxVersions(); g.setAuthorizations(new Authorizations(SECRET,PRIVATE)); Result result=table.get(g); assertEquals(0,result.rawCells().length); p=new Put(Bytes.toBytes("row1")); p.addColumn(fam,qual,value); table.put(p); result=table.get(g); assertEquals(1,result.rawCells().length); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteColumnsWithDiffColsAndTags1() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual1,125l,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual1,126l,value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { Delete d1=new Delete(row1); d1.setCellVisibility(new CellVisibility(SECRET)); d1.addColumns(fam,qual,126l); Delete d2=new Delete(row1); d2.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d2.addColumns(fam,qual1,126l); try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ table.delete(createList(d1,d2)); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,1); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteFamilySpecificTimeStampWithMulipleVersionsDoneTwice() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addFamily(fam,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addFamily(fam,127l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); assertEquals(current.getTimestamp(),127l); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteFamilyAndDeleteColumnsWithAndWithoutVisibilityExp() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { Delete d1=new Delete(row1); d1.addFamily(fam); Delete d2=new Delete(row1); d2.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d2.addColumns(fam,qual); try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ table.delete(createList(d1,d2)); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); scanner.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumn(fam,qual,127l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); assertEquals(current.getTimestamp(),127l); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersions() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d.addColumns(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testVisibilityLabelsWithDeleteFamilyWithPutsReAppearing() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,1); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,value1); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(CONFIDENTIAL)); scanner=table.getScanner(s); next=scanner.next(3); assertEquals(next.length,1); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET)); scanner=table.getScanner(s); Result[] next1=scanner.next(3); assertEquals(next1.length,0); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testVisibilityLabelsWithDeleteFamilyWithMultipleVersionsNoTimestamp() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d1=new Delete(row1); d1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d1.addFamily(fam); table.delete(d1); Delete d2=new Delete(row1); d2.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d2.addFamily(fam); table.delete(d2); Delete d3=new Delete(row1); d3.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d3.addFamily(fam); table.delete(d3); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(1,next.length); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteFamilyLatestTimeStampWithMulipleVersionsWithoutCellVisibilityInPuts() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPutsWithoutVisibility(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsWithDeleteFamilyVersion() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); long[] ts=new long[]{123l,125l}; try (Table table=createTableAndWriteDataWithLabels(tableName,ts,CONFIDENTIAL + "|" + TOPSECRET,SECRET)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addFamilyVersion(fam,123l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteFamiliesWithoutAndWithVisibilityLabels() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(row1); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); Delete d=new Delete(row1); d.addFamily(fam); table.delete(d); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,1); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); table.delete(d); scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanAfterCompaction() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ SECRET+ "&"+ TOPSECRET+ ")")); d.addFamily(fam,126l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Put put=new Put(Bytes.toBytes("row3")); put.addColumn(fam,qual,127l,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "&" + PRIVATE)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); TEST_UTIL.getHBaseAdmin().compact(tableName); Thread.sleep(5000); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 3); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReAppearing() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put1=new Put(Bytes.toBytes("row1")); put1.addColumn(fam,qual,123l,value); put1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); Put put2=new Put(Bytes.toBytes("row1")); put2.addColumn(fam,qual,123l,value1); put2.setCellVisibility(new CellVisibility(SECRET)); table.put(createList(put1,put2)); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(CONFIDENTIAL,SECRET)); ResultScanner scanner=table.getScanner(s); assertEquals(scanner.next(3).length,1); scanner.close(); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumn(fam,qual,123l); table.delete(d); } try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumn(fam,qual,123l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(CONFIDENTIAL)); scanner=table.getScanner(s); assertEquals(scanner.next(3).length,0); scanner.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testSpecificDeletesFollowedByDeleteFamily() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumn(fam,qual,126l); table.delete(d); d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamilyVersion(fam,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(5); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(5); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnWithLatestTimeStampUsingMultipleVersionsAfterCompaction() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Put put=new Put(Bytes.toBytes("row3")); put.addColumn(fam,qual,127l,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "&" + PRIVATE)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); TEST_UTIL.getHBaseAdmin().majorCompact(tableName); Thread.sleep(5000); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 3); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsWithDeleteColumnExactVersion() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); long[] ts=new long[]{123l,125l}; try (Table table=createTableAndWriteDataWithLabels(tableName,ts,CONFIDENTIAL + "|" + TOPSECRET,SECRET)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addColumn(fam,qual,123l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testDeleteColumnWithLatestTimeStampWhenNoVersionMatches() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); Put put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,128l,value); put.setCellVisibility(new CellVisibility(TOPSECRET)); table.put(put); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumn(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),128l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,129l,value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),129l); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteWithFamilyDeletesOfSameTsButDifferentVisibilities() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); Table table=TEST_UTIL.getConnection().getTable(tableName); long t1=1234L; CellVisibility cellVisibility1=new CellVisibility(SECRET); CellVisibility cellVisibility2=new CellVisibility(PRIVATE); Put p=new Put(row1); p.addColumn(fam,qual,t1,value); p.setCellVisibility(cellVisibility1); table.put(p); p=new Put(row1); p.addColumn(fam,qual1,t1,value); p.setCellVisibility(cellVisibility2); table.put(p); Delete d=new Delete(row1); d.addFamily(fam,t1); d.setCellVisibility(cellVisibility2); table.delete(d); d=new Delete(row1); d.addFamily(fam,t1); d.setCellVisibility(cellVisibility1); table.delete(d); Get g=new Get(row1); g.setMaxVersions(); g.setAuthorizations(new Authorizations(SECRET,PRIVATE)); Result result=table.get(g); assertEquals(0,result.rawCells().length); p=new Put(row2); p.addColumn(fam,qual,t1,value); p.setCellVisibility(cellVisibility1); table.put(p); p=new Put(row2); p.addColumn(fam,qual1,t1,value); p.setCellVisibility(cellVisibility2); table.put(p); d=new Delete(row2); d.addFamilyVersion(fam,t1); d.setCellVisibility(cellVisibility2); table.delete(d); d=new Delete(row2); d.addFamilyVersion(fam,t1); d.setCellVisibility(cellVisibility1); table.delete(d); g=new Get(row2); g.setMaxVersions(); g.setAuthorizations(new Authorizations(SECRET,PRIVATE)); result=table.get(g); assertEquals(0,result.rawCells().length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testSpecificDeletesFollowedByDeleteFamily1() throws Exception { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.setAuths(conn,new String[]{CONFIDENTIAL,PRIVATE,SECRET},SUPERUSER.getShortName()); } catch ( Throwable e) { } return null; } } ; VisibilityLabelsResponse response=SUPERUSER.runAs(action); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumn(fam,qual); table.delete(d); d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamilyVersion(fam,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(5); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(5); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVisibilityLabelsWithDeleteFamily() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=createTableAndWriteDataWithLabels(tableName,SECRET,CONFIDENTIAL + "|" + TOPSECRET)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row2); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultipleDeleteFamilyVersionWithDiffLabels() throws Exception { PrivilegedExceptionAction action=new PrivilegedExceptionAction(){ @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ return VisibilityClient.setAuths(conn,new String[]{CONFIDENTIAL,PRIVATE,SECRET},SUPERUSER.getShortName()); } catch ( Throwable e) { } return null; } } ; VisibilityLabelsResponse response=SUPERUSER.runAs(action); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamilyVersion(fam,123l); table.delete(d); d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamilyVersion(fam,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(5); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testDeleteColumnLatestWithNoCellVisibility() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addColumn(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); scanAll(next); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addColumns(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); scanAll(next); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addFamily(fam,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); scanAll(next); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); scanAll(next); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addColumns(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); scanAll(next); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addFamilyVersion(fam,126l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); scanAll(next); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeletesWithoutAndWithVisibilityLabels() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(row1); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); Delete d=new Delete(row1); d.addColumn(fam,qual); table.delete(d); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,1); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumn(fam,qual); table.delete(d); scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteFamilyLatestTimeStampWithMulipleVersions() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamily(fam); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testVisibilityExpressionWithNotEqualORCondition() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,123l,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,124l,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "|" + PRIVATE)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.addColumn(fam,qual,124l); d.setCellVisibility(new CellVisibility(PRIVATE)); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testVisibilityLabelsWithDeleteColumnsWithPutsReAppearing() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,1); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,value1); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumns(fam,qual); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(CONFIDENTIAL)); scanner=table.getScanner(s); next=scanner.next(3); assertEquals(next.length,1); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET)); scanner=table.getScanner(s); Result[] next1=scanner.next(3); assertEquals(next1.length,0); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testVisibilityCombinations() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); colDesc.setMaxVersions(5); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,123l,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(Bytes.toBytes("row1")); put.addColumn(fam,qual,124l,value1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumns(fam,qual,126l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumn(fam,qual,123l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(CONFIDENTIAL,SECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnAndDeleteFamilylSpecificTimeStampWithMulipleVersion() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addFamily(fam,124l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); assertEquals(current.getTimestamp(),127l); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteColumnswithMultipleColumnsWithMultipleVersions() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPutsWithDiffCols(tableName)){ TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumns(fam,qual,125l); try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); TEST_UTIL.getHBaseAdmin().flush(tableName); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),124l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),qual1,0,qual1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),qual2,0,qual2.length)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDiffDeleteTypesForTheSameCellUsingMultipleVersions() throws Exception { setAuths(); final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=doPuts(tableName)){ PrivilegedExceptionAction actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&"+ CONFIDENTIAL+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumns(fam,qual,125l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); Scan s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),127l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); actiona=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&"+ PRIVATE+ ")|("+ TOPSECRET+ "&"+ SECRET+ ")")); d.addColumn(fam,qual,127l); table.delete(d); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(actiona); s=new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET,PRIVATE,CONFIDENTIAL,TOPSECRET)); scanner=table.getScanner(s); next=scanner.next(3); assertTrue(next.length == 2); cellScanner=next[0].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),126l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),125l); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row1,0,row1.length)); assertEquals(current.getTimestamp(),123l); cellScanner=next[1].cellScanner(); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),row2,0,row2.length)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteFamiliesWithAndWithoutVisibilityLabels() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin=TEST_UTIL.getHBaseAdmin(); HColumnDescriptor colDesc=new HColumnDescriptor(fam); HTableDescriptor desc=new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); try (Table table=TEST_UTIL.getConnection().getTable(tableName)){ Put put=new Put(row1); put.addColumn(fam,qual,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); Delete d=new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); table.delete(d); PrivilegedExceptionAction scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); d=new Delete(row1); d.addFamily(fam); table.delete(d); scanAction=new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(3); assertEquals(next.length,0); } catch ( Throwable t) { throw new IOException(t); } return null; } } ; SUPERUSER.runAs(scanAction); } }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithSLGStack

InternalCallVerifier NullVerifier 
@Test public void testWithSAGStack() throws Exception { TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); try (Table table=TEST_UTIL.createTable(tableName,CF)){ Put put=new Put(ROW_1); put.addColumn(CF,Q1,HConstants.LATEST_TIMESTAMP,value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put=new Put(ROW_1); put.addColumn(CF,Q2,HConstants.LATEST_TIMESTAMP,value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); LabelFilteringScanLabelGenerator.labelToFilter=CONFIDENTIAL; Scan s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); ResultScanner scanner=table.getScanner(s); Result next=scanner.next(); assertNotNull(next.getColumnLatestCell(CF,Q1)); assertNull(next.getColumnLatestCell(CF,Q2)); } }

Class: org.apache.hadoop.hbase.security.visibility.TestVisibilityLablesWithGroups

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGroupAuths() throws Exception { final TableName tableName=TableName.valueOf(TEST_NAME.getMethodName()); TEST_UTIL.createTable(tableName,CF); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Put put=new Put(ROW_1); put.addColumn(CF,Q1,HConstants.LATEST_TIMESTAMP,value1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put=new Put(ROW_1); put.addColumn(CF,Q2,HConstants.LATEST_TIMESTAMP,value2); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put=new Put(ROW_1); put.addColumn(CF,Q3,HConstants.LATEST_TIMESTAMP,value3); table.put(put); } return null; } } ); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(1); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q1,0,Q1.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value1,0,value1.length)); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q2,0,Q2.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value2,0,value2.length)); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value3,0,value3.length)); } return null; } } ); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { GetAuthsResponse authsResponse=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ authsResponse=VisibilityClient.getAuths(conn,"@testgroup"); } catch ( Throwable e) { fail("Should not have failed"); } List authsList=new ArrayList(); for ( ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } assertEquals(1,authsList.size()); assertTrue(authsList.contains(CONFIDENTIAL)); return null; } } ); TESTUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s=new Scan(); ResultScanner scanner=table.getScanner(s); Result[] next=scanner.next(1); assertTrue(next.length == 1); CellScanner cellScanner=next[0].cellScanner(); cellScanner.advance(); Cell current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q2,0,Q2.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value2,0,value2.length)); cellScanner.advance(); current=cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(),current.getRowOffset(),current.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(),current.getQualifierOffset(),current.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current.getValueArray(),current.getValueOffset(),current.getValueLength(),value3,0,value3.length)); Scan s1=new Scan(); s1.setAuthorizations(new Authorizations(new String[]{SECRET,CONFIDENTIAL})); ResultScanner scanner1=table.getScanner(s1); Result[] next1=scanner1.next(1); assertTrue(next1.length == 1); CellScanner cellScanner1=next1[0].cellScanner(); cellScanner1.advance(); Cell current1=cellScanner1.current(); assertTrue(Bytes.equals(current1.getRowArray(),current1.getRowOffset(),current1.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current1.getQualifierArray(),current1.getQualifierOffset(),current1.getQualifierLength(),Q2,0,Q2.length)); assertTrue(Bytes.equals(current1.getValueArray(),current1.getValueOffset(),current1.getValueLength(),value2,0,value2.length)); cellScanner1.advance(); current1=cellScanner1.current(); assertTrue(Bytes.equals(current1.getRowArray(),current1.getRowOffset(),current1.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current1.getQualifierArray(),current1.getQualifierOffset(),current1.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current1.getValueArray(),current1.getValueOffset(),current1.getValueLength(),value3,0,value3.length)); Scan s2=new Scan(); s2.setAuthorizations(new Authorizations(new String[]{SECRET})); ResultScanner scanner2=table.getScanner(s2); Result next2=scanner2.next(); CellScanner cellScanner2=next2.cellScanner(); cellScanner2.advance(); Cell current2=cellScanner2.current(); assertTrue(Bytes.equals(current2.getRowArray(),current2.getRowOffset(),current2.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current2.getQualifierArray(),current2.getQualifierOffset(),current2.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current2.getValueArray(),current2.getValueOffset(),current2.getValueLength(),value3,0,value3.length)); assertFalse(cellScanner2.advance()); } return null; } } ); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { VisibilityLabelsResponse response=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ response=VisibilityClient.clearAuths(conn,new String[]{CONFIDENTIAL},"@testgroup"); } catch ( Throwable e) { fail("Should not have failed"); } return null; } } ); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { GetAuthsResponse authsResponse=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ authsResponse=VisibilityClient.getAuths(conn,"@testgroup"); } catch ( Throwable e) { fail("Should not have failed"); } List authsList=new ArrayList(); for ( ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } assertEquals(0,authsList.size()); return null; } } ); TESTUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection connection=ConnectionFactory.createConnection(conf);Table table=connection.getTable(tableName)){ Scan s1=new Scan(); s1.setAuthorizations(new Authorizations(new String[]{SECRET,CONFIDENTIAL})); ResultScanner scanner1=table.getScanner(s1); Result[] next1=scanner1.next(1); assertTrue(next1.length == 1); CellScanner cellScanner1=next1[0].cellScanner(); cellScanner1.advance(); Cell current1=cellScanner1.current(); assertTrue(Bytes.equals(current1.getRowArray(),current1.getRowOffset(),current1.getRowLength(),ROW_1,0,ROW_1.length)); assertTrue(Bytes.equals(current1.getQualifierArray(),current1.getQualifierOffset(),current1.getQualifierLength(),Q3,0,Q3.length)); assertTrue(Bytes.equals(current1.getValueArray(),current1.getValueOffset(),current1.getValueLength(),value3,0,value3.length)); assertFalse(cellScanner1.advance()); } return null; } } ); }

Class: org.apache.hadoop.hbase.security.visibility.TestWithDisabledAuthorization

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testManageUserAuths() throws Throwable { SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ VisibilityClient.setAuths(conn,new String[]{SECRET,CONFIDENTIAL},USER_RW.getShortName()); } catch ( Throwable t) { fail("Should not have failed"); } return null; } } ); PrivilegedExceptionAction> getAuths=new PrivilegedExceptionAction>(){ public List run() throws Exception { GetAuthsResponse authsResponse=null; try (Connection conn=ConnectionFactory.createConnection(conf)){ authsResponse=VisibilityClient.getAuths(conn,USER_RW.getShortName()); } catch ( Throwable t) { fail("Should not have failed"); } List authsList=new ArrayList(); for ( ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } return authsList; } } ; List authsList=SUPERUSER.runAs(getAuths); assertEquals(2,authsList.size()); assertTrue(authsList.contains(SECRET)); assertTrue(authsList.contains(CONFIDENTIAL)); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ VisibilityClient.clearAuths(conn,new String[]{SECRET},USER_RW.getShortName()); } catch ( Throwable t) { fail("Should not have failed"); } return null; } } ); authsList=SUPERUSER.runAs(getAuths); assertEquals(1,authsList.size()); assertTrue(authsList.contains(CONFIDENTIAL)); SUPERUSER.runAs(new PrivilegedExceptionAction(){ public Void run() throws Exception { try (Connection conn=ConnectionFactory.createConnection(conf)){ VisibilityClient.clearAuths(conn,new String[]{CONFIDENTIAL},USER_RW.getShortName()); } catch ( Throwable t) { fail("Should not have failed"); } return null; } } ); authsList=SUPERUSER.runAs(getAuths); assertEquals(0,authsList.size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=180000) public void testPassiveVisibility() throws Exception { try (Table t=createTableAndWriteDataWithLabels(TableName.valueOf(TEST_NAME.getMethodName()),SECRET,PRIVATE,SECRET + "|" + CONFIDENTIAL,PRIVATE + "|" + CONFIDENTIAL)){ Scan s=new Scan(); s.setAuthorizations(new Authorizations()); try (ResultScanner scanner=t.getScanner(s)){ Result[] next=scanner.next(10); assertEquals(next.length,4); } s=new Scan(); s.setAuthorizations(new Authorizations(SECRET)); try (ResultScanner scanner=t.getScanner(s)){ Result[] next=scanner.next(10); assertEquals(next.length,4); } s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL)); try (ResultScanner scanner=t.getScanner(s)){ Result[] next=scanner.next(10); assertEquals(next.length,4); } s=new Scan(); s.setAuthorizations(new Authorizations(SECRET,CONFIDENTIAL,PRIVATE)); try (ResultScanner scanner=t.getScanner(s)){ Result[] next=scanner.next(10); assertEquals(next.length,4); } } }

Class: org.apache.hadoop.hbase.snapshot.TestFlushSnapshotFromClient

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testSnapshotStateAfterMerge() throws Exception { int numRows=DEFAULT_NUM_ROWS; Admin admin=UTIL.getHBaseAdmin(); SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.loadData(UTIL,TABLE_NAME,numRows,TEST_FAM); String snapshotBeforeMergeName="snapshotBeforeMerge"; admin.snapshot(snapshotBeforeMergeName,TABLE_NAME,SnapshotDescription.Type.FLUSH); TableName cloneBeforeMergeName=TableName.valueOf("cloneBeforeMerge"); admin.cloneSnapshot(snapshotBeforeMergeName,cloneBeforeMergeName); SnapshotTestingUtils.waitForTableToBeOnline(UTIL,cloneBeforeMergeName); List regions=admin.getTableRegions(TABLE_NAME); Collections.sort(regions,new Comparator(){ public int compare( HRegionInfo r1, HRegionInfo r2){ return Bytes.compareTo(r1.getStartKey(),r2.getStartKey()); } } ); int numRegions=admin.getTableRegions(TABLE_NAME).size(); int numRegionsAfterMerge=numRegions - 2; admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),regions.get(2).getEncodedNameAsBytes(),true); admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),regions.get(6).getEncodedNameAsBytes(),true); waitRegionsAfterMerge(numRegionsAfterMerge); assertEquals(numRegionsAfterMerge,admin.getTableRegions(TABLE_NAME).size()); TableName cloneAfterMergeName=TableName.valueOf("cloneAfterMerge"); admin.cloneSnapshot(snapshotBeforeMergeName,cloneAfterMergeName); SnapshotTestingUtils.waitForTableToBeOnline(UTIL,cloneAfterMergeName); verifyRowCount(UTIL,TABLE_NAME,numRows); verifyRowCount(UTIL,cloneBeforeMergeName,numRows); verifyRowCount(UTIL,cloneAfterMergeName,numRows); UTIL.deleteTable(cloneAfterMergeName); UTIL.deleteTable(cloneBeforeMergeName); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void testTakeSnapshotAfterMerge() throws Exception { int numRows=DEFAULT_NUM_ROWS; Admin admin=UTIL.getHBaseAdmin(); SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.loadData(UTIL,TABLE_NAME,numRows,TEST_FAM); List regions=admin.getTableRegions(TABLE_NAME); Collections.sort(regions,new Comparator(){ public int compare( HRegionInfo r1, HRegionInfo r2){ return Bytes.compareTo(r1.getStartKey(),r2.getStartKey()); } } ); int numRegions=admin.getTableRegions(TABLE_NAME).size(); int numRegionsAfterMerge=numRegions - 2; admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),regions.get(2).getEncodedNameAsBytes(),true); admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),regions.get(6).getEncodedNameAsBytes(),true); waitRegionsAfterMerge(numRegionsAfterMerge); assertEquals(numRegionsAfterMerge,admin.getTableRegions(TABLE_NAME).size()); String snapshotName="snapshotAfterMerge"; SnapshotTestingUtils.snapshot(admin,snapshotName,TABLE_NAME.getNameAsString(),SnapshotDescription.Type.FLUSH,3); TableName cloneName=TableName.valueOf("cloneMerge"); admin.cloneSnapshot(snapshotName,cloneName); SnapshotTestingUtils.waitForTableToBeOnline(UTIL,cloneName); verifyRowCount(UTIL,TABLE_NAME,numRows); verifyRowCount(UTIL,cloneName,numRows); UTIL.deleteTable(cloneName); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Demonstrate that we reject snapshot requests if there is a snapshot already running on the * same table currently running and that concurrent snapshots on different tables can both * succeed concurretly. */ @Test(timeout=300000) public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException { final TableName TABLE2_NAME=TableName.valueOf(TABLE_NAME + "2"); int ssNum=20; Admin admin=UTIL.getHBaseAdmin(); SnapshotTestingUtils.assertNoSnapshots(admin); SnapshotTestingUtils.createTable(UTIL,TABLE2_NAME,TEST_FAM); SnapshotTestingUtils.loadData(UTIL,TABLE_NAME,DEFAULT_NUM_ROWS,TEST_FAM); SnapshotTestingUtils.loadData(UTIL,TABLE2_NAME,DEFAULT_NUM_ROWS,TEST_FAM); final CountDownLatch toBeSubmitted=new CountDownLatch(ssNum); class SSRunnable implements Runnable { SnapshotDescription ss; SSRunnable( SnapshotDescription ss){ this.ss=ss; } @Override public void run(){ try { Admin admin=UTIL.getHBaseAdmin(); LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss)); admin.takeSnapshotAsync(ss); } catch ( Exception e) { LOG.info("Exception during snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss) + ". This is ok, we expect some",e); } LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss)); toBeSubmitted.countDown(); } } ; SnapshotDescription[] descs=new SnapshotDescription[ssNum]; for (int i=0; i < ssNum; i++) { SnapshotDescription.Builder builder=SnapshotDescription.newBuilder(); builder.setTable(((i % 2) == 0 ? TABLE_NAME : TABLE2_NAME).getNameAsString()); builder.setName("ss" + i); builder.setType(SnapshotDescription.Type.FLUSH); descs[i]=builder.build(); } for (int i=0; i < ssNum; i++) { new Thread(new SSRunnable(descs[i])).start(); } toBeSubmitted.await(); while (true) { int doneCount=0; for ( SnapshotDescription ss : descs) { try { if (admin.isSnapshotFinished(ss)) { doneCount++; } } catch ( Exception e) { LOG.warn("Got an exception when checking for snapshot " + ss.getName(),e); doneCount++; } } if (doneCount == descs.length) { break; } Thread.sleep(100); } UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); List taken=admin.listSnapshots(); int takenSize=taken.size(); LOG.info("Taken " + takenSize + " snapshots: "+ taken); assertTrue("We expect at least 1 request to be rejected because of we concurrently" + " issued many requests",takenSize < ssNum && takenSize > 0); int t1SnapshotsCount=0; int t2SnapshotsCount=0; for ( SnapshotDescription ss : taken) { if (TableName.valueOf(ss.getTable()).equals(TABLE_NAME)) { t1SnapshotsCount++; } else if (TableName.valueOf(ss.getTable()).equals(TABLE2_NAME)) { t2SnapshotsCount++; } } assertTrue("We expect at least 1 snapshot of table1 ",t1SnapshotsCount > 0); assertTrue("We expect at least 1 snapshot of table2 ",t2SnapshotsCount > 0); UTIL.deleteTable(TABLE2_NAME); }

Class: org.apache.hadoop.hbase.spark.TestJavaHBaseContext

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testBulkDelete() throws IOException { List list=new ArrayList<>(); list.add(Bytes.toBytes("1")); list.add(Bytes.toBytes("2")); list.add(Bytes.toBytes("3")); JavaRDD rdd=jsc.parallelize(list); Configuration conf=htu.getConfiguration(); populateTableWithMockData(conf,TableName.valueOf(tableName)); JavaHBaseContext hbaseContext=new JavaHBaseContext(jsc,conf); hbaseContext.bulkDelete(rdd,TableName.valueOf(tableName),new JavaHBaseBulkDeleteExample.DeleteFunction(),2); try (Connection conn=ConnectionFactory.createConnection(conf);Table table=conn.getTable(TableName.valueOf(tableName))){ Result result1=table.get(new Get(Bytes.toBytes("1"))); Assert.assertNull("Row 1 should had been deleted",result1.getRow()); Result result2=table.get(new Get(Bytes.toBytes("2"))); Assert.assertNull("Row 2 should had been deleted",result2.getRow()); Result result3=table.get(new Get(Bytes.toBytes("3"))); Assert.assertNull("Row 3 should had been deleted",result3.getRow()); Result result4=table.get(new Get(Bytes.toBytes("4"))); Assert.assertNotNull("Row 4 should had been deleted",result4.getRow()); Result result5=table.get(new Get(Bytes.toBytes("5"))); Assert.assertNotNull("Row 5 should had been deleted",result5.getRow()); } }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testBulkPut() throws IOException { List list=new ArrayList<>(); list.add("1," + columnFamilyStr + ",a,1"); list.add("2," + columnFamilyStr + ",a,2"); list.add("3," + columnFamilyStr + ",a,3"); list.add("4," + columnFamilyStr + ",a,4"); list.add("5," + columnFamilyStr + ",a,5"); JavaRDD rdd=jsc.parallelize(list); Configuration conf=htu.getConfiguration(); JavaHBaseContext hbaseContext=new JavaHBaseContext(jsc,conf); Connection conn=ConnectionFactory.createConnection(conf); Table table=conn.getTable(TableName.valueOf(tableName)); try { List deletes=new ArrayList<>(); for (int i=1; i < 6; i++) { deletes.add(new Delete(Bytes.toBytes(Integer.toString(i)))); } table.delete(deletes); } finally { table.close(); } hbaseContext.bulkPut(rdd,TableName.valueOf(tableName),new PutFunction()); table=conn.getTable(TableName.valueOf(tableName)); try { Result result1=table.get(new Get(Bytes.toBytes("1"))); Assert.assertNotNull("Row 1 should had been deleted",result1.getRow()); Result result2=table.get(new Get(Bytes.toBytes("2"))); Assert.assertNotNull("Row 2 should had been deleted",result2.getRow()); Result result3=table.get(new Get(Bytes.toBytes("3"))); Assert.assertNotNull("Row 3 should had been deleted",result3.getRow()); Result result4=table.get(new Get(Bytes.toBytes("4"))); Assert.assertNotNull("Row 4 should had been deleted",result4.getRow()); Result result5=table.get(new Get(Bytes.toBytes("5"))); Assert.assertNotNull("Row 5 should had been deleted",result5.getRow()); } finally { table.close(); conn.close(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBulkGet() throws IOException { List list=new ArrayList<>(); list.add(Bytes.toBytes("1")); list.add(Bytes.toBytes("2")); list.add(Bytes.toBytes("3")); list.add(Bytes.toBytes("4")); list.add(Bytes.toBytes("5")); JavaRDD rdd=jsc.parallelize(list); Configuration conf=htu.getConfiguration(); populateTableWithMockData(conf,TableName.valueOf(tableName)); JavaHBaseContext hbaseContext=new JavaHBaseContext(jsc,conf); final JavaRDD stringJavaRDD=hbaseContext.bulkGet(TableName.valueOf(tableName),2,rdd,new GetFunction(),new ResultFunction()); Assert.assertEquals(stringJavaRDD.count(),5); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDistributedScan() throws IOException { Configuration conf=htu.getConfiguration(); populateTableWithMockData(conf,TableName.valueOf(tableName)); JavaHBaseContext hbaseContext=new JavaHBaseContext(jsc,conf); Scan scan=new Scan(); scan.setCaching(100); JavaRDD javaRdd=hbaseContext.hbaseRDD(TableName.valueOf(tableName),scan).map(new ScanConvertFunction()); List results=javaRdd.collect(); Assert.assertEquals(results.size(),5); }

Class: org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testContinuousIngest() throws IOException, Exception { Configuration conf=getTestingUtil(getConf()).getConfiguration(); if (isMultiUnevenColumnFamilies(getConf())) { conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,FlushLargeStoresPolicy.class.getName()); } int ret=ToolRunner.run(conf,new Loop(),new String[]{"1","1","2000000",util.getDataTestDirOnTestFS("IntegrationTestBigLinkedList").toString(),"1"}); org.junit.Assert.assertEquals(0,ret); }

Class: org.apache.hadoop.hbase.thrift.TestThriftServerCmdLine

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=600000) public void testRunThriftServer() throws Exception { List args=new ArrayList(); if (implType != null) { String serverTypeOption=implType.toString(); assertTrue(serverTypeOption.startsWith("-")); args.add(serverTypeOption); } port=HBaseTestingUtility.randomFreePort(); args.add("-" + ThriftServer.PORT_OPTION); args.add(String.valueOf(port)); if (specifyFramed) { args.add("-" + ThriftServer.FRAMED_OPTION); } if (specifyBindIP) { args.add("-" + ThriftServer.BIND_OPTION); args.add(InetAddress.getLocalHost().getHostName()); } if (specifyCompact) { args.add("-" + ThriftServer.COMPACT_OPTION); } args.add("start"); thriftServer=new ThriftServer(TEST_UTIL.getConfiguration()); startCmdLineThread(args.toArray(new String[args.size()])); for (int i=0; i < 100 && (thriftServer.serverRunner == null || thriftServer.serverRunner.tserver == null); i++) { Thread.sleep(100); } Class expectedClass=implType != null ? implType.serverClass : TBoundedThreadPoolServer.class; assertEquals(expectedClass,thriftServer.serverRunner.tserver.getClass()); try { talkToThriftServer(); } catch ( Exception ex) { clientSideException=ex; } finally { stopCmdLineThread(); } if (clientSideException != null) { LOG.error("Thrift client threw an exception. Parameters:" + getParametersString(),clientSideException); throw new Exception(clientSideException); } }

Class: org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandler

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testIncrement() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testIncrement".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(Bytes.toBytes(1L)))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); List incrementColumns=new ArrayList(); incrementColumns.add(new TColumnIncrement(wrap(familyAname),wrap(qualifierAname))); TIncrement increment=new TIncrement(wrap(rowName),incrementColumns); handler.increment(table,increment); TGet get=new TGet(wrap(rowName)); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); assertEquals(1,result.getColumnValuesSize()); TColumnValue columnValue=result.getColumnValues().get(0); assertArrayEquals(Bytes.toBytes(2L),columnValue.getValue()); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testScanWithFilter() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); TColumnValue columnValue=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); List columnValues=new ArrayList(); columnValues.add(columnValue); for (int i=0; i < 10; i++) { TPut put=new TPut(wrap(("testScanWithFilter" + i).getBytes()),columnValues); handler.put(table,put); } TScan scan=new TScan(); List columns=new ArrayList(); TColumn column=new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testScanWithFilter".getBytes()); scan.setStopRow("testScanWithFilter\uffff".getBytes()); scan.setFilterString(wrap(("KeyOnlyFilter()").getBytes())); int scanId=handler.openScanner(table,scan); List results=handler.getScannerRows(scanId,10); assertEquals(10,results.size()); for (int i=0; i < 10; i++) { assertArrayEquals(("testScanWithFilter" + i).getBytes(),results.get(i).getRow()); assertEquals(0,results.get(i).getColumnValues().get(0).getValue().length); } results=handler.getScannerRows(scanId,10); assertEquals(0,results.size()); handler.closeScanner(scanId); try { handler.getScannerRows(scanId,10); fail("Scanner id should be invalid"); } catch ( TIllegalArgument e) { } }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testScanWithBatchSize() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); for (int i=0; i < 100; i++) { String colNum=pad(i,(byte)3); TColumnValue columnValue=new TColumnValue(wrap(familyAname),wrap(("col" + colNum).getBytes()),wrap(("val" + colNum).getBytes())); columnValues.add(columnValue); } TPut put=new TPut(wrap(("testScanWithBatchSize").getBytes()),columnValues); handler.put(table,put); TScan scan=new TScan(); List columns=new ArrayList(); TColumn column=new TColumn(); column.setFamily(familyAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testScanWithBatchSize".getBytes()); scan.setStopRow("testScanWithBatchSize\uffff".getBytes()); scan.setBatchSize(10); int scanId=handler.openScanner(table,scan); List results=null; for (int i=0; i < 10; i++) { results=handler.getScannerRows(scanId,1); assertEquals(1,results.size()); List cols=results.get(0).getColumnValues(); assertEquals(10,cols.size()); for (int y=0; y < 10; y++) { int colNum=y + (10 * i); String colNumPad=pad(colNum,(byte)3); assertArrayEquals(("col" + colNumPad).getBytes(),cols.get(y).getQualifier()); } } results=handler.getScannerRows(scanId,1); assertEquals(0,results.size()); handler.closeScanner(scanId); try { handler.getScannerRows(scanId,1); fail("Scanner id should be invalid"); } catch ( TIllegalArgument e) { } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteAllTimestamps() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testDeleteAllTimestamps".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); TColumnValue columnValueA=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); columnValueA.setTimestamp(System.currentTimeMillis() - 10); columnValues.add(columnValueA); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); columnValueA.setTimestamp(System.currentTimeMillis()); handler.put(table,put); TGet get=new TGet(wrap(rowName)); get.setMaxVersions(2); TResult result=handler.get(table,get); assertEquals(2,result.getColumnValuesSize()); TDelete delete=new TDelete(wrap(rowName)); List deleteColumns=new ArrayList(); TColumn deleteColumn=new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); delete.setColumns(deleteColumns); delete.setDeleteType(TDeleteType.DELETE_COLUMNS); handler.deleteSingle(table,delete); get=new TGet(wrap(rowName)); result=handler.get(table,get); assertNull(result.getRow()); assertEquals(0,result.getColumnValuesSize()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetScannerResults() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); TColumnValue columnValue=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); List columnValues=new ArrayList(); columnValues.add(columnValue); for (int i=0; i < 20; i++) { TPut put=new TPut(wrap(("testGetScannerResults" + pad(i,(byte)2)).getBytes()),columnValues); handler.put(table,put); } TScan scan=new TScan(); List columns=new ArrayList(); TColumn column=new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testGetScannerResults".getBytes()); scan.setStopRow("testGetScannerResults05".getBytes()); List results=handler.getScannerResults(table,scan,5); assertEquals(5,results.size()); for (int i=0; i < 5; i++) { assertArrayEquals(("testGetScannerResults" + pad(i,(byte)2)).getBytes(),results.get(i).getRow()); } scan.setStopRow("testGetScannerResults10".getBytes()); results=handler.getScannerResults(table,scan,10); assertEquals(10,results.size()); for (int i=0; i < 10; i++) { assertArrayEquals(("testGetScannerResults" + pad(i,(byte)2)).getBytes(),results.get(i).getRow()); } scan.setStopRow("testGetScannerResults20".getBytes()); results=handler.getScannerResults(table,scan,20); assertEquals(20,results.size()); for (int i=0; i < 20; i++) { assertArrayEquals(("testGetScannerResults" + pad(i,(byte)2)).getBytes(),results.get(i).getRow()); } scan=new TScan(); scan.setColumns(columns); scan.setReversed(true); scan.setStartRow("testGetScannerResults20".getBytes()); scan.setStopRow("testGetScannerResults".getBytes()); results=handler.getScannerResults(table,scan,20); assertEquals(20,results.size()); for (int i=0; i < 20; i++) { assertArrayEquals(("testGetScannerResults" + pad(19 - i,(byte)2)).getBytes(),results.get(i).getRow()); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Put valueA to a row, make sure put has happened, then create a mutation object to put valueB * and delete ValueA, then check that the row value is only valueB. * @throws Exception */ @Test public void testMutateRow() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testMutateRow".getBytes(); ByteBuffer table=wrap(tableAname); List columnValuesA=new ArrayList(); TColumnValue columnValueA=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA=new TPut(wrap(rowName),columnValuesA); putA.setColumnValues(columnValuesA); handler.put(table,putA); TGet get=new TGet(wrap(rowName)); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); List returnedColumnValues=result.getColumnValues(); List expectedColumnValues=new ArrayList(); expectedColumnValues.add(columnValueA); assertTColumnValuesEqual(expectedColumnValues,returnedColumnValues); List columnValuesB=new ArrayList(); TColumnValue columnValueB=new TColumnValue(wrap(familyAname),wrap(qualifierBname),wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB=new TPut(wrap(rowName),columnValuesB); putB.setColumnValues(columnValuesB); TDelete delete=new TDelete(wrap(rowName)); List deleteColumns=new ArrayList(); TColumn deleteColumn=new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); delete.setColumns(deleteColumns); List mutations=new ArrayList(); TMutation mutationA=TMutation.put(putB); mutations.add(mutationA); TMutation mutationB=TMutation.deleteSingle(delete); mutations.add(mutationB); TRowMutations tRowMutations=new TRowMutations(wrap(rowName),mutations); handler.mutateRow(table,tRowMutations); result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); returnedColumnValues=result.getColumnValues(); expectedColumnValues=new ArrayList(); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues,returnedColumnValues); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Create TPut, TDelete , TIncrement objects, set durability then call ThriftUtility * functions to get Put , Delete and Increment respectively. Use getDurability to make sure * the returned objects have the appropriate durability setting. * @throws Exception */ @Test public void testDurability() throws Exception { byte[] rowName="testDurability".getBytes(); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); List incrementColumns=new ArrayList(); incrementColumns.add(new TColumnIncrement(wrap(familyAname),wrap(qualifierAname))); TDelete tDelete=new TDelete(wrap(rowName)); tDelete.setDurability(TDurability.SKIP_WAL); Delete delete=deleteFromThrift(tDelete); assertEquals(delete.getDurability(),Durability.SKIP_WAL); tDelete.setDurability(TDurability.ASYNC_WAL); delete=deleteFromThrift(tDelete); assertEquals(delete.getDurability(),Durability.ASYNC_WAL); tDelete.setDurability(TDurability.SYNC_WAL); delete=deleteFromThrift(tDelete); assertEquals(delete.getDurability(),Durability.SYNC_WAL); tDelete.setDurability(TDurability.FSYNC_WAL); delete=deleteFromThrift(tDelete); assertEquals(delete.getDurability(),Durability.FSYNC_WAL); TPut tPut=new TPut(wrap(rowName),columnValues); tPut.setDurability(TDurability.SKIP_WAL); Put put=putFromThrift(tPut); assertEquals(put.getDurability(),Durability.SKIP_WAL); tPut.setDurability(TDurability.ASYNC_WAL); put=putFromThrift(tPut); assertEquals(put.getDurability(),Durability.ASYNC_WAL); tPut.setDurability(TDurability.SYNC_WAL); put=putFromThrift(tPut); assertEquals(put.getDurability(),Durability.SYNC_WAL); tPut.setDurability(TDurability.FSYNC_WAL); put=putFromThrift(tPut); assertEquals(put.getDurability(),Durability.FSYNC_WAL); TIncrement tIncrement=new TIncrement(wrap(rowName),incrementColumns); tIncrement.setDurability(TDurability.SKIP_WAL); Increment increment=incrementFromThrift(tIncrement); assertEquals(increment.getDurability(),Durability.SKIP_WAL); tIncrement.setDurability(TDurability.ASYNC_WAL); increment=incrementFromThrift(tIncrement); assertEquals(increment.getDurability(),Durability.ASYNC_WAL); tIncrement.setDurability(TDurability.SYNC_WAL); increment=incrementFromThrift(tIncrement); assertEquals(increment.getDurability(),Durability.SYNC_WAL); tIncrement.setDurability(TDurability.FSYNC_WAL); increment=incrementFromThrift(tIncrement); assertEquals(increment.getDurability(),Durability.FSYNC_WAL); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckAndMutate() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); ByteBuffer row=wrap("row".getBytes()); ByteBuffer family=wrap(familyAname); ByteBuffer qualifier=wrap(qualifierAname); ByteBuffer value=wrap(valueAname); List columnValuesB=new ArrayList(); TColumnValue columnValueB=new TColumnValue(family,wrap(qualifierBname),wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB=new TPut(row,columnValuesB); putB.setColumnValues(columnValuesB); TRowMutations tRowMutations=new TRowMutations(row,Arrays.asList(TMutation.put(putB))); TResult result=handler.get(table,new TGet(row)); assertEquals(0,result.getColumnValuesSize()); assertFalse("Expected condition to not pass",handler.checkAndMutate(table,row,family,qualifier,TCompareOp.EQUAL,value,tRowMutations)); List columnValuesA=new ArrayList(); TColumnValue columnValueA=new TColumnValue(family,qualifier,value); columnValuesA.add(columnValueA); handler.put(table,new TPut(row,columnValuesA)); result=handler.get(table,new TGet(row)); assertEquals(1,result.getColumnValuesSize()); assertTColumnValueEqual(columnValueA,result.getColumnValues().get(0)); assertTrue("Expected condition to pass",handler.checkAndMutate(table,row,family,qualifier,TCompareOp.EQUAL,value,tRowMutations)); result=handler.get(table,new TGet(row)); assertEquals(2,result.getColumnValuesSize()); assertTColumnValueEqual(columnValueA,result.getColumnValues().get(0)); assertTColumnValueEqual(columnValueB,result.getColumnValues().get(1)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testExists() throws TIOError, TException { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testExists".getBytes(); ByteBuffer table=wrap(tableAname); TGet get=new TGet(wrap(rowName)); assertFalse(handler.exists(table,get)); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); assertTrue(handler.exists(table,get)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testMetrics() throws Exception { Configuration conf=UTIL.getConfiguration(); ThriftMetrics metrics=getMetrics(conf); ThriftHBaseServiceHandler hbaseHandler=createHandler(); THBaseService.Iface handler=ThriftHBaseServiceHandler.newInstance(hbaseHandler,metrics); byte[] rowName="testMetrics".getBytes(); ByteBuffer table=wrap(tableAname); TGet get=new TGet(wrap(rowName)); assertFalse(handler.exists(table,get)); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); assertTrue(handler.exists(table,get)); metricsHelper.assertCounter("put_num_ops",1,metrics.getSource()); metricsHelper.assertCounter("exists_num_ops",2,metrics.getSource()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteMultiple() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); byte[] rowName1="testDeleteMultiple1".getBytes(); byte[] rowName2="testDeleteMultiple2".getBytes(); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname))); List puts=new ArrayList(); puts.add(new TPut(wrap(rowName1),columnValues)); puts.add(new TPut(wrap(rowName2),columnValues)); handler.putMultiple(table,puts); List deletes=new ArrayList(); deletes.add(new TDelete(wrap(rowName1))); deletes.add(new TDelete(wrap(rowName2))); List deleteResults=handler.deleteMultiple(table,deletes); assertEquals(0,deleteResults.size()); assertFalse(handler.exists(table,new TGet(wrap(rowName1)))); assertFalse(handler.exists(table,new TGet(wrap(rowName2)))); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testAttribute() throws Exception { byte[] rowName="testAttribute".getBytes(); byte[] attributeKey="attribute1".getBytes(); byte[] attributeValue="value1".getBytes(); Map attributes=new HashMap(); attributes.put(wrap(attributeKey),wrap(attributeValue)); TGet tGet=new TGet(wrap(rowName)); tGet.setAttributes(attributes); Get get=getFromThrift(tGet); assertArrayEquals(get.getAttribute("attribute1"),attributeValue); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); TPut tPut=new TPut(wrap(rowName),columnValues); tPut.setAttributes(attributes); Put put=putFromThrift(tPut); assertArrayEquals(put.getAttribute("attribute1"),attributeValue); TScan tScan=new TScan(); tScan.setAttributes(attributes); Scan scan=scanFromThrift(tScan); assertArrayEquals(scan.getAttribute("attribute1"),attributeValue); List incrementColumns=new ArrayList(); incrementColumns.add(new TColumnIncrement(wrap(familyAname),wrap(qualifierAname))); TIncrement tIncrement=new TIncrement(wrap(rowName),incrementColumns); tIncrement.setAttributes(attributes); Increment increment=incrementFromThrift(tIncrement); assertArrayEquals(increment.getAttribute("attribute1"),attributeValue); TDelete tDelete=new TDelete(wrap(rowName)); tDelete.setAttributes(attributes); Delete delete=deleteFromThrift(tDelete); assertArrayEquals(delete.getAttribute("attribute1"),attributeValue); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDelete() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testDelete".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); TColumnValue columnValueA=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); TColumnValue columnValueB=new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname)); columnValues.add(columnValueA); columnValues.add(columnValueB); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); TDelete delete=new TDelete(wrap(rowName)); List deleteColumns=new ArrayList(); TColumn deleteColumn=new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); delete.setColumns(deleteColumns); handler.deleteSingle(table,delete); TGet get=new TGet(wrap(rowName)); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); List returnedColumnValues=result.getColumnValues(); List expectedColumnValues=new ArrayList(); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues,returnedColumnValues); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * check that checkAndPut fails if the cell does not exist, then put in the cell, then check * that the checkAndPut succeeds. * @throws Exception */ @Test public void testCheckAndPut() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testCheckAndPut".getBytes(); ByteBuffer table=wrap(tableAname); List columnValuesA=new ArrayList(); TColumnValue columnValueA=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA=new TPut(wrap(rowName),columnValuesA); putA.setColumnValues(columnValuesA); List columnValuesB=new ArrayList(); TColumnValue columnValueB=new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB=new TPut(wrap(rowName),columnValuesB); putB.setColumnValues(columnValuesB); assertFalse(handler.checkAndPut(table,wrap(rowName),wrap(familyAname),wrap(qualifierAname),wrap(valueAname),putB)); TGet get=new TGet(wrap(rowName)); TResult result=handler.get(table,get); assertEquals(0,result.getColumnValuesSize()); handler.put(table,putA); assertTrue(handler.checkAndPut(table,wrap(rowName),wrap(familyAname),wrap(qualifierAname),wrap(valueAname),putB)); result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); List returnedColumnValues=result.getColumnValues(); List expectedColumnValues=new ArrayList(); expectedColumnValues.add(columnValueA); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues,returnedColumnValues); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPutGet() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testPutGet".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); TGet get=new TGet(wrap(rowName)); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); List returnedColumnValues=result.getColumnValues(); assertTColumnValuesEqual(columnValues,returnedColumnValues); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPutTTL() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testPutTTL".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(Bytes.toBytes(1L)))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); Map attributes=new HashMap<>(); long ttlTimeMs=2000L; attributes.put(wrap(Bytes.toBytes("_ttl")),wrap(Bytes.toBytes(ttlTimeMs))); put.setAttributes(attributes); handler.put(table,put); TGet getOne=new TGet(wrap(rowName)); TResult resultOne=handler.get(table,getOne); assertArrayEquals(rowName,resultOne.getRow()); assertEquals(1,resultOne.getColumnValuesSize()); Thread.sleep(ttlTimeMs * 15); TGet getTwo=new TGet(wrap(rowName)); TResult resultTwo=handler.get(table,getTwo); assertNull(resultTwo.getRow()); assertEquals(0,resultTwo.getColumnValuesSize()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * check that checkAndDelete fails if the cell does not exist, then put in the cell, then * check that the checkAndDelete succeeds. * @throws Exception */ @Test public void testCheckAndDelete() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testCheckAndDelete".getBytes(); ByteBuffer table=wrap(tableAname); List columnValuesA=new ArrayList(); TColumnValue columnValueA=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA=new TPut(wrap(rowName),columnValuesA); putA.setColumnValues(columnValuesA); List columnValuesB=new ArrayList(); TColumnValue columnValueB=new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB=new TPut(wrap(rowName),columnValuesB); putB.setColumnValues(columnValuesB); handler.put(table,putB); TDelete delete=new TDelete(wrap(rowName)); assertFalse(handler.checkAndDelete(table,wrap(rowName),wrap(familyAname),wrap(qualifierAname),wrap(valueAname),delete)); TGet get=new TGet(wrap(rowName)); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); assertTColumnValuesEqual(columnValuesB,result.getColumnValues()); handler.put(table,putA); assertTrue(handler.checkAndDelete(table,wrap(rowName),wrap(familyAname),wrap(qualifierAname),wrap(valueAname),delete)); result=handler.get(table,get); assertFalse(result.isSetRow()); assertEquals(0,result.getColumnValuesSize()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDeleteSingleTimestamp() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testDeleteSingleTimestamp".getBytes(); ByteBuffer table=wrap(tableAname); long timestamp1=System.currentTimeMillis() - 10; long timestamp2=System.currentTimeMillis(); List columnValues=new ArrayList(); TColumnValue columnValueA=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); columnValueA.setTimestamp(timestamp1); columnValues.add(columnValueA); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); columnValueA.setTimestamp(timestamp2); handler.put(table,put); TGet get=new TGet(wrap(rowName)); get.setMaxVersions(2); TResult result=handler.get(table,get); assertEquals(2,result.getColumnValuesSize()); TDelete delete=new TDelete(wrap(rowName)); List deleteColumns=new ArrayList(); TColumn deleteColumn=new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); delete.setColumns(deleteColumns); delete.setDeleteType(TDeleteType.DELETE_COLUMN); handler.deleteSingle(table,delete); get=new TGet(wrap(rowName)); result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); assertEquals(1,result.getColumnValuesSize()); assertEquals(timestamp1,result.getColumnValues().get(0).getTimestamp()); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testScan() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); TColumnValue columnValue=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); List columnValues=new ArrayList(); columnValues.add(columnValue); for (int i=0; i < 10; i++) { TPut put=new TPut(wrap(("testScan" + i).getBytes()),columnValues); handler.put(table,put); } TScan scan=new TScan(); List columns=new ArrayList(); TColumn column=new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testScan".getBytes()); scan.setStopRow("testScan\uffff".getBytes()); int scanId=handler.openScanner(table,scan); List results=handler.getScannerRows(scanId,10); assertEquals(10,results.size()); for (int i=0; i < 10; i++) { assertArrayEquals(("testScan" + i).getBytes(),results.get(i).getRow()); } results=handler.getScannerRows(scanId,10); assertEquals(0,results.size()); handler.closeScanner(scanId); try { handler.getScannerRows(scanId,10); fail("Scanner id should be invalid"); } catch ( TIllegalArgument e) { } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPutGetMultiple() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); byte[] rowName1="testPutGetMultiple1".getBytes(); byte[] rowName2="testPutGetMultiple2".getBytes(); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname))); List puts=new ArrayList(); puts.add(new TPut(wrap(rowName1),columnValues)); puts.add(new TPut(wrap(rowName2),columnValues)); handler.putMultiple(table,puts); List gets=new ArrayList(); gets.add(new TGet(wrap(rowName1))); gets.add(new TGet(wrap(rowName2))); List results=handler.getMultiple(table,gets); assertEquals(2,results.size()); assertArrayEquals(rowName1,results.get(0).getRow()); assertTColumnValuesEqual(columnValues,results.get(0).getColumnValues()); assertArrayEquals(rowName2,results.get(1).getRow()); assertTColumnValuesEqual(columnValues,results.get(1).getColumnValues()); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testReverseScan() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); TColumnValue columnValue=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); List columnValues=new ArrayList(); columnValues.add(columnValue); for (int i=0; i < 10; i++) { TPut put=new TPut(wrap(("testReverseScan" + i).getBytes()),columnValues); handler.put(table,put); } TScan scan=new TScan(); scan.setReversed(true); List columns=new ArrayList(); TColumn column=new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testReverseScan\uffff".getBytes()); scan.setStopRow("testReverseScan".getBytes()); int scanId=handler.openScanner(table,scan); List results=handler.getScannerRows(scanId,10); assertEquals(10,results.size()); for (int i=0; i < 10; i++) { assertArrayEquals(("testReverseScan" + (9 - i)).getBytes(),results.get(i).getRow()); } results=handler.getScannerRows(scanId,10); assertEquals(0,results.size()); handler.closeScanner(scanId); try { handler.getScannerRows(scanId,10); fail("Scanner id should be invalid"); } catch ( TIllegalArgument e) { } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testAppend() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testAppend".getBytes(); ByteBuffer table=wrap(tableAname); byte[] v1=Bytes.toBytes("42"); byte[] v2=Bytes.toBytes("23"); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(v1))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); handler.put(table,put); List appendColumns=new ArrayList(); appendColumns.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(v2))); TAppend append=new TAppend(wrap(rowName),appendColumns); handler.append(table,append); TGet get=new TGet(wrap(rowName)); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); assertEquals(1,result.getColumnValuesSize()); TColumnValue columnValue=result.getColumnValues().get(0); assertArrayEquals(Bytes.add(v1,v2),columnValue.getValue()); }

Class: org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandlerWithLabels

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testAppend() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testAppend".getBytes(); ByteBuffer table=wrap(tableAname); byte[] v1=Bytes.toBytes(1L); byte[] v2=Bytes.toBytes(5L); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(Bytes.toBytes(1L)))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE)); handler.put(table,put); List appendColumns=new ArrayList(); appendColumns.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(v2))); TAppend append=new TAppend(wrap(rowName),appendColumns); append.setCellVisibility(new TCellVisibility().setExpression(SECRET)); handler.append(table,append); TGet get=new TGet(wrap(rowName)); TAuthorization tauth=new TAuthorization(); List labels=new ArrayList(); labels.add(SECRET); tauth.setLabels(labels); get.setAuthorizations(tauth); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); assertEquals(1,result.getColumnValuesSize()); TColumnValue columnValue=result.getColumnValues().get(0); assertArrayEquals(Bytes.add(v1,v2),columnValue.getValue()); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetScannerResultsWithAuthorizations() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); TColumnValue columnValue=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); List columnValues=new ArrayList(); columnValues.add(columnValue); for (int i=0; i < 20; i++) { TPut put=new TPut(wrap(("testGetScannerResults" + pad(i,(byte)2)).getBytes()),columnValues); if (i == 3) { put.setCellVisibility(new TCellVisibility().setExpression(PUBLIC)); } else { put.setCellVisibility(new TCellVisibility().setExpression("(" + SECRET + "|"+ CONFIDENTIAL+ ")"+ "&"+ "!"+ TOPSECRET)); } handler.put(table,put); } TScan scan=new TScan(); List columns=new ArrayList(); TColumn column=new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testGetScannerResults".getBytes()); scan.setStopRow("testGetScannerResults05".getBytes()); TAuthorization tauth=new TAuthorization(); List labels=new ArrayList(); labels.add(SECRET); labels.add(PRIVATE); tauth.setLabels(labels); scan.setAuthorizations(tauth); List results=handler.getScannerResults(table,scan,5); assertEquals(4,results.size()); for (int i=0; i < 4; i++) { if (i < 3) { assertArrayEquals(("testGetScannerResults" + pad(i,(byte)2)).getBytes(),results.get(i).getRow()); } else if (i == 3) { continue; } else { assertArrayEquals(("testGetScannerResults" + pad(i + 1,(byte)2)).getBytes(),results.get(i).getRow()); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testIncrementWithTagsWithNotMatchLabels() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testIncrementWithTagsWithNotMatchLabels".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(Bytes.toBytes(1L)))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE)); handler.put(table,put); List incrementColumns=new ArrayList(); incrementColumns.add(new TColumnIncrement(wrap(familyAname),wrap(qualifierAname))); TIncrement increment=new TIncrement(wrap(rowName),incrementColumns); increment.setCellVisibility(new TCellVisibility().setExpression(SECRET)); handler.increment(table,increment); TGet get=new TGet(wrap(rowName)); TAuthorization tauth=new TAuthorization(); List labels=new ArrayList(); labels.add(PUBLIC); tauth.setLabels(labels); get.setAuthorizations(tauth); TResult result=handler.get(table,get); assertNull(result.getRow()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testIncrementWithTags() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testIncrementWithTags".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(Bytes.toBytes(1L)))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE)); handler.put(table,put); List incrementColumns=new ArrayList(); incrementColumns.add(new TColumnIncrement(wrap(familyAname),wrap(qualifierAname))); TIncrement increment=new TIncrement(wrap(rowName),incrementColumns); increment.setCellVisibility(new TCellVisibility().setExpression(SECRET)); handler.increment(table,increment); TGet get=new TGet(wrap(rowName)); TAuthorization tauth=new TAuthorization(); List labels=new ArrayList(); labels.add(SECRET); tauth.setLabels(labels); get.setAuthorizations(tauth); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); assertEquals(1,result.getColumnValuesSize()); TColumnValue columnValue=result.getColumnValues().get(0); assertArrayEquals(Bytes.toBytes(2L),columnValue.getValue()); }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testScanWithVisibilityLabels() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); ByteBuffer table=wrap(tableAname); TColumnValue columnValue=new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname)); List columnValues=new ArrayList(); columnValues.add(columnValue); for (int i=0; i < 10; i++) { TPut put=new TPut(wrap(("testScan" + i).getBytes()),columnValues); if (i == 5) { put.setCellVisibility(new TCellVisibility().setExpression(PUBLIC)); } else { put.setCellVisibility(new TCellVisibility().setExpression("(" + SECRET + "|"+ CONFIDENTIAL+ ")"+ "&"+ "!"+ TOPSECRET)); } handler.put(table,put); } TScan scan=new TScan(); List columns=new ArrayList(); TColumn column=new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow("testScan".getBytes()); scan.setStopRow("testScan\uffff".getBytes()); TAuthorization tauth=new TAuthorization(); List labels=new ArrayList(); labels.add(SECRET); labels.add(PRIVATE); tauth.setLabels(labels); scan.setAuthorizations(tauth); int scanId=handler.openScanner(table,scan); List results=handler.getScannerRows(scanId,10); assertEquals(9,results.size()); Assert.assertFalse(Bytes.equals(results.get(5).getRow(),("testScan" + 5).getBytes())); for (int i=0; i < 9; i++) { if (i < 5) { assertArrayEquals(("testScan" + i).getBytes(),results.get(i).getRow()); } else if (i == 5) { continue; } else { assertArrayEquals(("testScan" + (i + 1)).getBytes(),results.get(i).getRow()); } } results=handler.getScannerRows(scanId,9); assertEquals(0,results.size()); handler.closeScanner(scanId); try { handler.getScannerRows(scanId,9); fail("Scanner id should be invalid"); } catch ( TIllegalArgument e) { } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetsWithLabels() throws Exception { ThriftHBaseServiceHandler handler=createHandler(); byte[] rowName="testPutGet".getBytes(); ByteBuffer table=wrap(tableAname); List columnValues=new ArrayList(); columnValues.add(new TColumnValue(wrap(familyAname),wrap(qualifierAname),wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname),wrap(qualifierBname),wrap(valueBname))); TPut put=new TPut(wrap(rowName),columnValues); put.setColumnValues(columnValues); put.setCellVisibility(new TCellVisibility().setExpression("(" + SECRET + "|"+ CONFIDENTIAL+ ")"+ "&"+ "!"+ TOPSECRET)); handler.put(table,put); TGet get=new TGet(wrap(rowName)); TAuthorization tauth=new TAuthorization(); List labels=new ArrayList(); labels.add(SECRET); labels.add(PRIVATE); tauth.setLabels(labels); get.setAuthorizations(tauth); TResult result=handler.get(table,get); assertArrayEquals(rowName,result.getRow()); List returnedColumnValues=result.getColumnValues(); assertTColumnValuesEqual(columnValues,returnedColumnValues); }

Class: org.apache.hadoop.hbase.trace.TestHTraceHooks

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testTraceCreateTable() throws Exception { TraceScope tableCreationSpan=Trace.startSpan("creating table",Sampler.ALWAYS); Table table; try { table=TEST_UTIL.createTable(TableName.valueOf("table"),FAMILY_BYTES); } finally { tableCreationSpan.close(); } TEST_UTIL.waitFor(1000,new Waiter.Predicate(){ @Override public boolean evaluate() throws Exception { return rcvr.getSpans().size() >= 5; } } ); Collection spans=rcvr.getSpans(); TraceTree traceTree=new TraceTree(spans); Collection roots=traceTree.getSpansByParent().find(ROOT_SPAN_ID); assertEquals(1,roots.size()); Span createTableRoot=roots.iterator().next(); assertEquals("creating table",createTableRoot.getDescription()); int createTableCount=0; for ( Span s : traceTree.getSpansByParent().find(createTableRoot.getSpanId())) { if (s.getDescription().startsWith("MasterService.CreateTable")) { createTableCount++; } } assertTrue(createTableCount >= 1); assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3); assertTrue(spans.size() > 5); Put put=new Put("row".getBytes()); put.addColumn(FAMILY_BYTES,"col".getBytes(),"value".getBytes()); TraceScope putSpan=Trace.startSpan("doing put",Sampler.ALWAYS); try { table.put(put); } finally { putSpan.close(); } spans=rcvr.getSpans(); traceTree=new TraceTree(spans); roots=traceTree.getSpansByParent().find(ROOT_SPAN_ID); assertEquals(2,roots.size()); Span putRoot=null; for ( Span root : roots) { if (root.getDescription().equals("doing put")) { putRoot=root; } } assertNotNull(putRoot); }

Class: org.apache.hadoop.hbase.types.TestFixedLengthWrapper

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReadWrite(){ for ( int limit : limits) { PositionedByteRange buff=new SimplePositionedMutableByteRange(limit); for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for ( byte[] val : VALUES) { buff.setPosition(0); DataType type=new FixedLengthWrapper(new RawBytes(ord),limit); assertEquals(limit,type.encode(buff,val)); buff.setPosition(0); byte[] actual=type.decode(buff); assertTrue("Decoding output differs from expected",Bytes.equals(val,0,val.length,actual,0,val.length)); buff.setPosition(0); assertEquals(limit,type.skip(buff)); } } } }

Class: org.apache.hadoop.hbase.types.TestOrderedBlob

InternalCallVerifier EqualityVerifier 
@Test public void testEncodedLength(){ PositionedByteRange buff=new SimplePositionedMutableByteRange(20); for ( DataType type : new OrderedBlob[]{OrderedBlob.ASCENDING,OrderedBlob.DESCENDING}) { for ( byte[] val : VALUES) { buff.setPosition(0); type.encode(buff,val); assertEquals("encodedLength does not match actual, " + Bytes.toStringBinary(val),buff.getPosition(),type.encodedLength(val)); } } }

Class: org.apache.hadoop.hbase.types.TestOrderedBlobVar

InternalCallVerifier EqualityVerifier 
@Test public void testEncodedLength(){ PositionedByteRange buff=new SimplePositionedMutableByteRange(20); for ( DataType type : new OrderedBlobVar[]{OrderedBlobVar.ASCENDING,OrderedBlobVar.DESCENDING}) { for ( byte[] val : VALUES) { buff.setPosition(0); type.encode(buff,val); assertEquals("encodedLength does not match actual, " + Bytes.toStringBinary(val),buff.getPosition(),type.encodedLength(val)); } } }

Class: org.apache.hadoop.hbase.types.TestOrderedString

InternalCallVerifier EqualityVerifier 
@Test public void testEncodedLength(){ PositionedByteRange buff=new SimplePositionedMutableByteRange(20); for ( DataType type : new OrderedString[]{OrderedString.ASCENDING,OrderedString.DESCENDING}) { for ( String val : VALUES) { buff.setPosition(0); type.encode(buff,val); assertEquals("encodedLength does not match actual, " + val,buff.getPosition(),type.encodedLength(val)); } } }

Class: org.apache.hadoop.hbase.types.TestPBCell

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Basic test to verify utility methods in {@link PBType} and delegation to protobuf works. */ @Test public void testRoundTrip(){ final Cell cell=new KeyValue(Bytes.toBytes("row"),Bytes.toBytes("fam"),Bytes.toBytes("qual"),Bytes.toBytes("val")); CellProtos.Cell c=ProtobufUtil.toCell(cell), decoded; PositionedByteRange pbr=new SimplePositionedByteRange(c.getSerializedSize()); pbr.setPosition(0); int encodedLength=CODEC.encode(pbr,c); pbr.setPosition(0); decoded=CODEC.decode(pbr); assertEquals(encodedLength,pbr.getPosition()); assertTrue(CellUtil.equals(cell,ProtobufUtil.toCell(decoded))); }

Class: org.apache.hadoop.hbase.types.TestRawString

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReadWrite(){ for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { RawString type=Order.ASCENDING == ord ? RawString.ASCENDING : RawString.DESCENDING; for ( String val : VALUES) { PositionedByteRange buff=new SimplePositionedMutableByteRange(Bytes.toBytes(val).length); assertEquals(buff.getLength(),type.encode(buff,val)); byte[] expected=Bytes.toBytes(val); ord.apply(expected); assertArrayEquals(expected,buff.getBytes()); buff.setPosition(0); assertEquals(val,type.decode(buff)); buff.setPosition(0); assertEquals(buff.getLength(),type.skip(buff)); assertEquals(buff.getLength(),buff.getPosition()); } } }

Class: org.apache.hadoop.hbase.types.TestStruct

IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test @SuppressWarnings("unchecked") public void testOrderPreservation() throws Exception { Object[] vals=new Object[constructorArgs.length]; PositionedByteRange[] encodedGeneric=new PositionedByteRange[constructorArgs.length]; PositionedByteRange[] encodedSpecialized=new PositionedByteRange[constructorArgs.length]; Constructor ctor=specialized.encodedClass().getConstructor(Object[].class); for (int i=0; i < vals.length; i++) { vals[i]=ctor.newInstance(new Object[]{constructorArgs[i]}); encodedGeneric[i]=new SimplePositionedMutableByteRange(generic.encodedLength(constructorArgs[i])); encodedSpecialized[i]=new SimplePositionedMutableByteRange(specialized.encodedLength(vals[i])); } for (int i=0; i < vals.length; i++) { generic.encode(encodedGeneric[i],constructorArgs[i]); encodedGeneric[i].setPosition(0); specialized.encode(encodedSpecialized[i],vals[i]); encodedSpecialized[i].setPosition(0); assertArrayEquals(encodedGeneric[i].getBytes(),encodedSpecialized[i].getBytes()); } Arrays.sort(vals); Arrays.sort(encodedGeneric); Arrays.sort(encodedSpecialized); for (int i=0; i < vals.length; i++) { assertEquals("Struct encoder does not preserve sort order at position " + i,vals[i],ctor.newInstance(new Object[]{generic.decode(encodedGeneric[i])})); assertEquals("Specialized encoder does not preserve sort order at position " + i,vals[i],specialized.decode(encodedSpecialized[i])); } }

Class: org.apache.hadoop.hbase.types.TestStructNullExtension

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Positive cases for null extension. */ @Test public void testNullableNullExtension(){ StructBuilder builder=new StructBuilder().add(OrderedNumeric.ASCENDING).add(OrderedString.ASCENDING); Struct shorter=builder.toStruct(); Struct longer=builder.add(new TerminatedWrapper(OrderedString.ASCENDING,"/")).add(OrderedNumeric.ASCENDING).toStruct(); PositionedByteRange buf1=new SimplePositionedMutableByteRange(7); Object[] val1=new Object[]{BigDecimal.ONE,"foo"}; assertEquals("Encoding shorter value wrote a surprising number of bytes.",buf1.getLength(),shorter.encode(buf1,val1)); int shortLen=buf1.getLength(); buf1.setPosition(0); StructIterator it=longer.iterator(buf1); it.skip(); it.skip(); assertEquals("Position should be at end. Broken test.",buf1.getLength(),buf1.getPosition()); assertEquals("Failed to skip null element with extended struct.",0,it.skip()); assertEquals("Failed to skip null element with extended struct.",0,it.skip()); buf1.setPosition(0); it=longer.iterator(buf1); assertEquals(BigDecimal.ONE,it.next()); assertEquals("foo",it.next()); assertEquals("Position should be at end. Broken test.",buf1.getLength(),buf1.getPosition()); assertNull("Failed to skip null element with extended struct.",it.next()); assertNull("Failed to skip null element with extended struct.",it.next()); buf1.setPosition(0); assertArrayEquals("Simple struct decoding is broken.",val1,shorter.decode(buf1)); buf1.setPosition(0); assertArrayEquals("Decoding short value with extended struct should append null elements.",Arrays.copyOf(val1,4),longer.decode(buf1)); PositionedByteRange buf2=new SimplePositionedMutableByteRange(7); buf1.setPosition(0); assertEquals("Encoding a short value with extended struct should have same result as using short struct.",shortLen,longer.encode(buf2,val1)); assertArrayEquals("Encoding a short value with extended struct should have same result as using short struct",buf1.getBytes(),buf2.getBytes()); val1=new Object[]{null,null,null,null}; buf1.set(0); buf2.set(0); assertEquals("Encoding null-truncated value wrote a surprising number of bytes.",buf1.getLength(),longer.encode(buf1,new Object[0])); assertEquals("Encoding null-extended value wrote a surprising number of bytes.",buf1.getLength(),longer.encode(buf1,val1)); assertArrayEquals("Encoded unexpected result.",buf1.getBytes(),buf2.getBytes()); assertArrayEquals("Decoded unexpected result.",val1,longer.decode(buf2)); Object[] val2=new Object[]{BigDecimal.ONE,null,null,null}; buf1.set(2); buf2.set(2); assertEquals("Encoding null-truncated value wrote a surprising number of bytes.",buf1.getLength(),longer.encode(buf1,Arrays.copyOf(val2,1))); assertEquals("Encoding null-extended value wrote a surprising number of bytes.",buf2.getLength(),longer.encode(buf2,val2)); assertArrayEquals("Encoded unexpected result.",buf1.getBytes(),buf2.getBytes()); buf2.setPosition(0); assertArrayEquals("Decoded unexpected result.",val2,longer.decode(buf2)); Object[] val3=new Object[]{BigDecimal.ONE,null,"foo",null}; buf1.set(9); buf2.set(9); assertEquals("Encoding null-truncated value wrote a surprising number of bytes.",buf1.getLength(),longer.encode(buf1,Arrays.copyOf(val3,3))); assertEquals("Encoding null-extended value wrote a surprising number of bytes.",buf2.getLength(),longer.encode(buf2,val3)); assertArrayEquals("Encoded unexpected result.",buf1.getBytes(),buf2.getBytes()); buf2.setPosition(0); assertArrayEquals("Decoded unexpected result.",val3,longer.decode(buf2)); }

Class: org.apache.hadoop.hbase.types.TestTerminatedWrapper

InternalCallVerifier EqualityVerifier 
@Test public void testSkipNonSkippable(){ PositionedByteRange buff=new SimplePositionedMutableByteRange(12); for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for ( byte[] term : TERMINATORS) { for ( byte[] val : VALUES_BYTES) { buff.setPosition(0); DataType type=new TerminatedWrapper(new RawBytes(ord),term); int expected=type.encode(buff,val); buff.setPosition(0); assertEquals(expected,type.skip(buff)); assertEquals(expected,buff.getPosition()); } } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testSkipSkippable(){ PositionedByteRange buff=new SimplePositionedMutableByteRange(14); for ( OrderedString t : new OrderedString[]{OrderedString.ASCENDING,OrderedString.DESCENDING}) { for ( byte[] term : TERMINATORS) { for ( String val : VALUES_STRINGS) { buff.setPosition(0); DataType type=new TerminatedWrapper(t,term); int expected=val.length() + 2 + term.length; assertEquals(expected,type.encode(buff,val)); buff.setPosition(0); assertEquals(expected,type.skip(buff)); assertEquals(expected,buff.getPosition()); } } } }

InternalCallVerifier EqualityVerifier 
@Test public void testReadWriteNonSkippable(){ PositionedByteRange buff=new SimplePositionedMutableByteRange(12); for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for ( byte[] term : TERMINATORS) { for ( byte[] val : VALUES_BYTES) { buff.setPosition(0); DataType type=new TerminatedWrapper(new RawBytes(ord),term); assertEquals(val.length + term.length,type.encode(buff,val)); buff.setPosition(0); assertArrayEquals(val,type.decode(buff)); assertEquals(val.length + term.length,buff.getPosition()); } } } }

InternalCallVerifier EqualityVerifier 
@Test public void testReadWriteSkippable(){ PositionedByteRange buff=new SimplePositionedMutableByteRange(14); for ( OrderedString t : new OrderedString[]{OrderedString.ASCENDING,OrderedString.DESCENDING}) { for ( byte[] term : TERMINATORS) { for ( String val : VALUES_STRINGS) { buff.setPosition(0); DataType type=new TerminatedWrapper(t,term); assertEquals(val.length() + 2 + term.length,type.encode(buff,val)); buff.setPosition(0); assertEquals(val,type.decode(buff)); assertEquals(val.length() + 2 + term.length,buff.getPosition()); } } } }

Class: org.apache.hadoop.hbase.types.TestUnion2

InternalCallVerifier EqualityVerifier 
@Test public void testSkip(){ Integer intVal=Integer.valueOf(10); String strVal="hello"; PositionedByteRange buff=new SimplePositionedMutableByteRange(10); SampleUnion1 type=new SampleUnion1(); int len=type.encode(buff,intVal); buff.setPosition(0); assertEquals(len,type.skip(buff)); buff.setPosition(0); len=type.encode(buff,strVal); buff.setPosition(0); assertEquals(len,type.skip(buff)); }

InternalCallVerifier BooleanVerifier 
@Test public void testEncodeDecode(){ Integer intVal=Integer.valueOf(10); String strVal="hello"; PositionedByteRange buff=new SimplePositionedMutableByteRange(10); SampleUnion1 type=new SampleUnion1(); type.encode(buff,intVal); buff.setPosition(0); assertTrue(0 == intVal.compareTo(type.decodeA(buff))); buff.setPosition(0); type.encode(buff,strVal); buff.setPosition(0); assertTrue(0 == strVal.compareTo(type.decodeB(buff))); }

Class: org.apache.hadoop.hbase.util.TestBoundedArrayQueue

IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBoundedArrayQueueOperations() throws Exception { assertEquals(0,queue.size()); assertNull(queue.poll()); assertNull(queue.peek()); for (int i=0; i < qMaxElements; i++) { assertTrue(queue.offer(i)); } assertEquals(qMaxElements,queue.size()); assertFalse(queue.offer(0)); assertEquals(0,queue.peek().intValue()); assertEquals(0,queue.peek().intValue()); for (int i=0; i < qMaxElements; i++) { assertEquals(i,queue.poll().intValue()); } assertEquals(0,queue.size()); assertNull(queue.poll()); assertTrue(queue.offer(100)); assertTrue(queue.offer(1000)); assertEquals(100,queue.peek().intValue()); assertEquals(100,queue.poll().intValue()); assertEquals(1000,queue.poll().intValue()); }

Class: org.apache.hadoop.hbase.util.TestBoundedPriorityBlockingQueue

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInsert() throws Exception { for (int i=1; i <= CAPACITY; i+=2) { assertTrue(queue.offer(new TestObject(i,i))); assertEquals((1 + i) / 2,queue.size()); } for (int i=2; i <= CAPACITY; i+=2) { assertTrue(queue.offer(new TestObject(i,i))); assertEquals(CAPACITY / 2 + (i / 2),queue.size()); } assertFalse(queue.offer(new TestObject(0,-1),5,TimeUnit.MILLISECONDS)); for (int i=1; i <= CAPACITY; ++i) { TestObject obj=queue.poll(); assertEquals(i,obj.getSeqId()); assertEquals(CAPACITY - i,queue.size()); assertEquals(i,queue.remainingCapacity()); } assertEquals(null,queue.poll()); }

InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier HybridVerifier 
@Test(timeout=10000) public void testPollInExecutor() throws InterruptedException { final TestObject testObj=new TestObject(0,0); final CyclicBarrier threadsStarted=new CyclicBarrier(2); ExecutorService executor=Executors.newFixedThreadPool(2); executor.execute(new Runnable(){ public void run(){ try { assertNull(queue.poll(1000,TimeUnit.MILLISECONDS)); threadsStarted.await(); assertSame(testObj,queue.poll(1000,TimeUnit.MILLISECONDS)); assertTrue(queue.isEmpty()); } catch ( Exception e) { throw new RuntimeException(e); } } } ); executor.execute(new Runnable(){ public void run(){ try { threadsStarted.await(); queue.offer(testObj); } catch ( Exception e) { throw new RuntimeException(e); } } } ); executor.shutdown(); assertTrue(executor.awaitTermination(8000,TimeUnit.MILLISECONDS)); }

IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPoll(){ assertNull(queue.poll()); PriorityQueue testList=new PriorityQueue(CAPACITY,new TestObjectComparator()); for (int i=0; i < CAPACITY; ++i) { TestObject obj=new TestObject(i,i); testList.add(obj); queue.offer(obj); } for (int i=0; i < CAPACITY; ++i) { assertEquals(testList.poll(),queue.poll()); } assertNull(null,queue.poll()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFifoSamePriority() throws Exception { assertTrue(CAPACITY >= 6); for (int i=0; i < 6; ++i) { assertTrue(queue.offer(new TestObject((1 + (i % 2)) * 10,i))); } for (int i=0; i < 6; i+=2) { TestObject obj=queue.poll(); assertEquals(10,obj.getPriority()); assertEquals(i,obj.getSeqId()); } for (int i=1; i < 6; i+=2) { TestObject obj=queue.poll(); assertEquals(20,obj.getPriority()); assertEquals(i,obj.getSeqId()); } assertEquals(null,queue.poll()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void tesAppend() throws Exception { for (int i=1; i <= CAPACITY; ++i) { assertTrue(queue.offer(new TestObject(i,i))); assertEquals(i,queue.size()); assertEquals(CAPACITY - i,queue.remainingCapacity()); } assertFalse(queue.offer(new TestObject(0,-1),5,TimeUnit.MILLISECONDS)); for (int i=1; i <= CAPACITY; ++i) { TestObject obj=queue.poll(); assertEquals(i,obj.getSeqId()); assertEquals(CAPACITY - i,queue.size()); assertEquals(i,queue.remainingCapacity()); } assertEquals(null,queue.poll()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void tesAppendSamePriority() throws Exception { for (int i=1; i <= CAPACITY; ++i) { assertTrue(queue.offer(new TestObject(0,i))); assertEquals(i,queue.size()); assertEquals(CAPACITY - i,queue.remainingCapacity()); } assertFalse(queue.offer(new TestObject(0,-1),5,TimeUnit.MILLISECONDS)); for (int i=1; i <= CAPACITY; ++i) { TestObject obj=queue.poll(); assertEquals(i,obj.getSeqId()); assertEquals(CAPACITY - i,queue.size()); assertEquals(i,queue.remainingCapacity()); } assertEquals(null,queue.poll()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPrepend() throws Exception { for (int i=1; i <= CAPACITY; ++i) { assertTrue(queue.offer(new TestObject(CAPACITY - i,i))); assertEquals(i,queue.size()); assertEquals(CAPACITY - i,queue.remainingCapacity()); } for (int i=1; i <= CAPACITY; ++i) { TestObject obj=queue.poll(); assertEquals(CAPACITY - (i - 1),obj.getSeqId()); assertEquals(CAPACITY - i,queue.size()); assertEquals(i,queue.remainingCapacity()); } assertEquals(null,queue.poll()); }

Class: org.apache.hadoop.hbase.util.TestByteBuffUtils

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCopyAndCompare() throws Exception { ByteBuffer bb1=ByteBuffer.allocate(50); ByteBuffer bb2=ByteBuffer.allocate(50); MultiByteBuff src=new MultiByteBuff(bb1,bb2); for (int i=0; i < 7; i++) { src.putLong(8l); } src.put((byte)1); src.put((byte)1); ByteBuffer bb3=ByteBuffer.allocate(50); ByteBuffer bb4=ByteBuffer.allocate(50); MultiByteBuff mbbDst=new MultiByteBuff(bb3,bb4); mbbDst.put(0,src,0,100); int compareTo=ByteBuff.compareTo(src,0,100,mbbDst,0,100); assertTrue(compareTo == 0); bb3=ByteBuffer.allocate(100); SingleByteBuff sbbDst=new SingleByteBuff(bb3); src.rewind(); sbbDst.put(0,src,0,100); compareTo=ByteBuff.compareTo(src,0,100,sbbDst,0,100); assertTrue(compareTo == 0); bb3=ByteBuffer.allocate(100); SingleByteBuff sbb=new SingleByteBuff(bb3); for (int i=0; i < 7; i++) { sbb.putLong(8l); } sbb.put((byte)1); sbb.put((byte)1); bb4=ByteBuffer.allocate(100); sbbDst=new SingleByteBuff(bb4); sbbDst.put(0,sbb,0,100); compareTo=ByteBuff.compareTo(sbb,0,100,sbbDst,0,100); assertTrue(compareTo == 0); sbb.rewind(); mbbDst=new MultiByteBuff(bb3,bb4); mbbDst.rewind(); mbbDst.put(0,sbb,0,100); compareTo=ByteBuff.compareTo(sbb,0,100,mbbDst,0,100); assertTrue(compareTo == 0); }

Class: org.apache.hadoop.hbase.util.TestByteBufferArray

InternalCallVerifier BooleanVerifier 
@Test public void testAsSubBufferWhenEndOffsetLandInLastBuffer() throws Exception { int capacity=4 * 1024 * 1024; ByteBufferArray array=new ByteBufferArray(capacity,false); ByteBuff subBuf=array.asSubByteBuff(0,capacity); subBuf.position(capacity - 1); assertTrue(subBuf.hasRemaining()); subBuf.get(); assertFalse(subBuf.hasRemaining()); }

Class: org.apache.hadoop.hbase.util.TestByteRangeWithKVSerialization

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWritingAndReadingCells() throws Exception { final byte[] FAMILY=Bytes.toBytes("f1"); final byte[] QUALIFIER=Bytes.toBytes("q1"); final byte[] VALUE=Bytes.toBytes("v"); int kvCount=1000000; List kvs=new ArrayList(kvCount); int totalSize=0; Tag[] tags=new Tag[]{new ArrayBackedTag((byte)1,"tag1")}; for (int i=0; i < kvCount; i++) { KeyValue kv=new KeyValue(Bytes.toBytes(i),FAMILY,QUALIFIER,i,VALUE,tags); kv.setSequenceId(i); kvs.add(kv); totalSize+=kv.getLength() + Bytes.SIZEOF_LONG; } PositionedByteRange pbr=new SimplePositionedMutableByteRange(totalSize); for ( KeyValue kv : kvs) { writeCell(pbr,kv); } PositionedByteRange pbr1=new SimplePositionedMutableByteRange(pbr.getBytes(),0,pbr.getPosition()); for (int i=0; i < kvCount; i++) { KeyValue kv=readCell(pbr1); KeyValue kv1=kvs.get(i); Assert.assertTrue(kv.equals(kv1)); Assert.assertTrue(Bytes.equals(kv.getValueArray(),kv.getValueOffset(),kv.getValueLength(),kv1.getValueArray(),kv1.getValueOffset(),kv1.getValueLength())); Assert.assertTrue(Bytes.equals(kv.getTagsArray(),kv.getTagsOffset(),kv.getTagsLength(),kv1.getTagsArray(),kv1.getTagsOffset(),kv1.getTagsLength())); Assert.assertEquals(kv1.getSequenceId(),kv.getSequenceId()); } }

Class: org.apache.hadoop.hbase.util.TestConnectionCache

InternalCallVerifier EqualityVerifier 
/** * test for ConnectionCache cleaning expired HConnection */ @Test public void testConnectionChore() throws Exception { UTIL.startMiniCluster(); ConnectionCache cache=new ConnectionCache(UTIL.getConfiguration(),UserProvider.instantiate(UTIL.getConfiguration()),1000,5000); ConnectionCache.ConnectionInfo info=cache.getCurrentConnection(); assertEquals(false,info.connection.isClosed()); Thread.sleep(7000); assertEquals(true,info.connection.isClosed()); UTIL.shutdownMiniCluster(); }

Class: org.apache.hadoop.hbase.util.TestCoprocessorClassLoader

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
@Test public void testCleanupOldJars() throws Exception { String className="TestCleanupOldJars"; String folder=TEST_UTIL.getDataTestDir().toString(); File jarFile=ClassLoaderTestHelper.buildJar(folder,className,null,ClassLoaderTestHelper.localDirPath(conf)); File tmpJarFile=new File(jarFile.getParent(),"/tmp/" + className + ".test.jar"); if (tmpJarFile.exists()) tmpJarFile.delete(); assertFalse("tmp jar file should not exist",tmpJarFile.exists()); IOUtils.copyBytes(new FileInputStream(jarFile),new FileOutputStream(tmpJarFile),conf,true); assertTrue("tmp jar file should be created",tmpJarFile.exists()); Path path=new Path(jarFile.getAbsolutePath()); ClassLoader parent=TestCoprocessorClassLoader.class.getClassLoader(); CoprocessorClassLoader.parentDirLockSet.clear(); ClassLoader classLoader=CoprocessorClassLoader.getClassLoader(path,parent,"111",conf); assertNotNull("Classloader should be created",classLoader); assertFalse("tmp jar file should be removed",tmpJarFile.exists()); }

Class: org.apache.hadoop.hbase.util.TestCoprocessorScanPolicy

InternalCallVerifier EqualityVerifier 
@Test public void testBaseCases() throws Exception { TableName tableName=TableName.valueOf("baseCases"); if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { TEST_UTIL.deleteTable(tableName); } Table t=TEST_UTIL.createTable(tableName,F,1); Put p=new Put(R); p.setAttribute("versions",new byte[]{}); p.addColumn(F,tableName.getName(),Bytes.toBytes(2)); t.put(p); long now=EnvironmentEdgeManager.currentTime(); p=new Put(R); p.addColumn(F,Q,now,Q); t.put(p); p=new Put(R); p.addColumn(F,Q,now + 1,Q); t.put(p); Get g=new Get(R); g.setMaxVersions(10); Result r=t.get(g); assertEquals(2,r.size()); TEST_UTIL.flush(tableName); TEST_UTIL.compact(tableName,true); g=new Get(R); g.setMaxVersions(10); r=t.get(g); assertEquals(2,r.size()); p=new Put(R); p.addColumn(F,Q,now + 2,Q); t.put(p); g=new Get(R); g.setMaxVersions(10); r=t.get(g); assertEquals(2,r.size()); t.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testTTL() throws Exception { TableName tableName=TableName.valueOf("testTTL"); if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { TEST_UTIL.deleteTable(tableName); } HTableDescriptor desc=new HTableDescriptor(tableName); HColumnDescriptor hcd=new HColumnDescriptor(F).setMaxVersions(10).setTimeToLive(1); desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); Table t=TEST_UTIL.getConnection().getTable(tableName); long now=EnvironmentEdgeManager.currentTime(); ManualEnvironmentEdge me=new ManualEnvironmentEdge(); me.setValue(now); EnvironmentEdgeManagerTestHelper.injectEdge(me); long ts=now - 2000; Put p=new Put(R); p.setAttribute("ttl",new byte[]{}); p.addColumn(F,tableName.getName(),Bytes.toBytes(3000L)); t.put(p); p=new Put(R); p.addColumn(F,Q,ts,Q); t.put(p); p=new Put(R); p.addColumn(F,Q,ts + 1,Q); t.put(p); Get g=new Get(R); g.setMaxVersions(10); Result r=t.get(g); assertEquals(2,r.size()); TEST_UTIL.flush(tableName); TEST_UTIL.compact(tableName,true); g=new Get(R); g.setMaxVersions(10); r=t.get(g); assertEquals(2,r.size()); me.setValue(now + 2000); g=new Get(R); g.setMaxVersions(10); r=t.get(g); assertEquals(0,r.size()); t.close(); EnvironmentEdgeManager.reset(); }

Class: org.apache.hadoop.hbase.util.TestCounter

InternalCallVerifier EqualityVerifier 
@Test public void testIncrement() throws Exception { for ( int threadCount : THREAD_COUNTS) { final Counter counter=new Counter(); execute(new Operation(){ @Override public void execute(){ counter.increment(); } } ,threadCount); Assert.assertEquals(threadCount * (long)DATA_COUNT,counter.get()); } }

InternalCallVerifier EqualityVerifier 
@Test public void testIncrementAndGet() throws Exception { for ( int threadCount : THREAD_COUNTS) { final Counter counter=new Counter(); execute(new Operation(){ @Override public void execute(){ counter.increment(); counter.get(); } } ,threadCount); Assert.assertEquals(threadCount * (long)DATA_COUNT,counter.get()); } }

Class: org.apache.hadoop.hbase.util.TestDefaultEnvironmentEdge

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testGetCurrentTimeUsesSystemClock(){ DefaultEnvironmentEdge edge=new DefaultEnvironmentEdge(); long systemTime=System.currentTimeMillis(); long edgeTime=edge.currentTime(); assertTrue("System time must be either the same or less than the edge time",systemTime < edgeTime || systemTime == edgeTime); try { Thread.sleep(1); } catch ( InterruptedException e) { fail(e.getMessage()); } long secondEdgeTime=edge.currentTime(); assertTrue("Second time must be greater than the first",secondEdgeTime > edgeTime); }

Class: org.apache.hadoop.hbase.util.TestDrainBarrier

InternalCallVerifier BooleanVerifier 
@Test public void testBeginEndStopWork() throws Exception { DrainBarrier barrier=new DrainBarrier(); assertTrue(barrier.beginOp()); assertTrue(barrier.beginOp()); barrier.endOp(); barrier.endOp(); barrier.stopAndDrainOps(); assertFalse(barrier.beginOp()); }

UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier 
@Test public void testStopIsBlockedByOps() throws Exception { final DrainBarrier barrier=new DrainBarrier(); barrier.beginOp(); barrier.beginOp(); barrier.beginOp(); barrier.endOp(); Thread stoppingThread=new Thread(new Runnable(){ @Override public void run(){ try { barrier.stopAndDrainOpsOnce(); } catch ( InterruptedException e) { fail("Should not have happened"); } } } ); stoppingThread.start(); barrier.endOp(); stoppingThread.join(1000); assertTrue(stoppingThread.isAlive()); barrier.endOp(); stoppingThread.join(30000); assertFalse(stoppingThread.isAlive()); }

Class: org.apache.hadoop.hbase.util.TestEncryptionTest

UtilityVerifier InternalCallVerifier 
@Test public void testTestCipher(){ Configuration conf=HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,KeyProviderForTesting.class.getName()); String algorithm=conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY,HConstants.CIPHER_AES); try { EncryptionTest.testEncryption(conf,algorithm,null); } catch ( Exception e) { fail("Test for cipher " + algorithm + " should have succeeded"); } try { EncryptionTest.testEncryption(conf,"foobar",null); fail("Test for bogus cipher should have failed"); } catch ( Exception e) { } }

Class: org.apache.hadoop.hbase.util.TestFSHDFSUtils

InternalCallVerifier BooleanVerifier 
@Test public void testIsSameHdfs() throws IOException { try { Class dfsUtilClazz=Class.forName("org.apache.hadoop.hdfs.DFSUtil"); dfsUtilClazz.getMethod("getNNServiceRpcAddresses",Configuration.class); } catch ( Exception e) { LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version."); return; } Configuration conf=HBaseConfiguration.create(); Path srcPath=new Path("hdfs://localhost:8020/"); Path desPath=new Path("hdfs://127.0.0.1/"); FileSystem srcFs=srcPath.getFileSystem(conf); FileSystem desFs=desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf,srcFs,desFs)); desPath=new Path("hdfs://127.0.0.1:8070/"); desFs=desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf,srcFs,desFs)); desPath=new Path("hdfs://127.0.1.1:8020/"); desFs=desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf,srcFs,desFs)); conf.set("fs.defaultFS","hdfs://haosong-hadoop"); conf.set("dfs.nameservices","haosong-hadoop"); conf.set("dfs.ha.namenodes.haosong-hadoop","nn1,nn2"); conf.set("dfs.client.failover.proxy.provider.haosong-hadoop","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1","127.0.0.1:8020"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2","127.10.2.1:8000"); desPath=new Path("/"); desFs=desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf,srcFs,desFs)); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1","127.10.2.1:8020"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2","127.0.0.1:8000"); desPath=new Path("/"); desFs=desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf,srcFs,desFs)); }

InternalCallVerifier BooleanVerifier 
/** * Test that isFileClosed makes us recover lease faster. * @throws IOException */ @Test(timeout=30000) public void testIsFileClosed() throws IOException { HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout",100000); CancelableProgressable reporter=Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); IsFileClosedDistributedFileSystem dfs=Mockito.mock(IsFileClosedDistributedFileSystem.class); Mockito.when(dfs.recoverLease(FILE)).thenReturn(false).thenReturn(false).thenReturn(true); Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs,FILE,HTU.getConfiguration(),reporter)); Mockito.verify(dfs,Mockito.times(2)).recoverLease(FILE); Mockito.verify(dfs,Mockito.times(1)).isFileClosed(FILE); }

InternalCallVerifier BooleanVerifier 
/** * Test recover lease eventually succeeding. * @throws IOException */ @Test(timeout=30000) public void testRecoverLease() throws IOException { HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout",1000); CancelableProgressable reporter=Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); DistributedFileSystem dfs=Mockito.mock(DistributedFileSystem.class); Mockito.when(dfs.recoverLease(FILE)).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs,FILE,HTU.getConfiguration(),reporter)); Mockito.verify(dfs,Mockito.times(5)).recoverLease(FILE); assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout",61000))); }

Class: org.apache.hadoop.hbase.util.TestFSTableDescriptors

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCreateAndUpdate() throws IOException { Path testdir=UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testCreate")); TableDescriptor td=new TableDescriptor(htd); FileSystem fs=FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,testdir); assertTrue(fstd.createTableDescriptor(td)); assertFalse(fstd.createTableDescriptor(td)); FileStatus[] statuses=fs.listStatus(testdir); assertTrue("statuses.length=" + statuses.length,statuses.length == 1); for (int i=0; i < 10; i++) { fstd.updateTableDescriptor(td); } statuses=fs.listStatus(testdir); assertTrue(statuses.length == 1); Path tmpTableDir=new Path(FSUtils.getTableDir(testdir,htd.getTableName()),".tmp"); statuses=fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHTableDescriptors() throws IOException, InterruptedException { final String name="testHTableDescriptors"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); FSTableDescriptors htds=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir){ @Override public HTableDescriptor get( TableName tablename) throws TableExistsException, FileNotFoundException, IOException { LOG.info(tablename + ", cachehits=" + this.cachehits); return super.get(tablename); } } ; final int count=10; for (int i=0; i < count; i++) { TableDescriptor htd=new TableDescriptor(new HTableDescriptor(TableName.valueOf(name + i))); htds.createTableDescriptor(htd); } for (int i=0; i < count; i++) { assertTrue(htds.get(TableName.valueOf(name + i)) != null); } for (int i=0; i < count; i++) { assertTrue(htds.get(TableName.valueOf(name + i)) != null); } for (int i=0; i < count; i++) { HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); htds.updateTableDescriptor(new TableDescriptor(htd)); } Thread.sleep(100); for (int i=0; i < count; i++) { assertTrue(htds.get(TableName.valueOf(name + i)) != null); } for (int i=0; i < count; i++) { assertTrue(htds.get(TableName.valueOf(name + i)) != null); } assertEquals(count * 4,htds.invocations); assertTrue("expected=" + (count * 2) + ", actual="+ htds.cachehits,htds.cachehits >= (count * 2)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testReadingOldHTDFromFS() throws IOException, DeserializationException { final String name="testReadingOldHTDFromFS"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=UTIL.getDataTestDir(name); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(name)); TableDescriptor td=new TableDescriptor(htd); Path descriptorFile=fstd.updateTableDescriptor(td); try (FSDataOutputStream out=fs.create(descriptorFile,true)){ out.write(htd.toByteArray()); } FSTableDescriptors fstd2=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); TableDescriptor td2=fstd2.getDescriptor(htd.getTableName()); assertEquals(td,td2); FileStatus descriptorFile2=FSTableDescriptors.getTableInfoPath(fs,fstd2.getTableDir(htd.getTableName())); byte[] buffer=td.toByteArray(); try (FSDataInputStream in=fs.open(descriptorFile2.getPath())){ in.readFully(buffer); } TableDescriptor td3=TableDescriptor.parseFrom(buffer); assertEquals(td,td3); }

APIUtilityVerifier InternalCallVerifier NullVerifier 
@Test public void testRemoves() throws IOException { final String name="testRemoves"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); TableDescriptors htds=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(name)); htds.add(htd); assertNotNull(htds.remove(htd.getTableName())); assertNull(htds.remove(htd.getTableName())); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetAll() throws IOException, InterruptedException { final String name="testGetAll"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); FSTableDescriptors htds=new FSTableDescriptorsTest(UTIL.getConfiguration(),fs,rootdir); final int count=4; for (int i=0; i < count; i++) { HTableDescriptor htd=new HTableDescriptor(name + i); htds.createTableDescriptor(htd); } HTableDescriptor htd=new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); htds.createTableDescriptor(htd); assertEquals("getAll() didn't return all TableDescriptors, expected: " + (count + 1) + " got: "+ htds.getAll().size(),count + 1,htds.getAll().size()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException { Path testdir=UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testCreateTableDescriptorUpdatesIfThereExistsAlready")); TableDescriptor td=new TableDescriptor(htd); FileSystem fs=FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,testdir); assertTrue(fstd.createTableDescriptor(td)); assertFalse(fstd.createTableDescriptor(td)); htd.setValue(Bytes.toBytes("mykey"),Bytes.toBytes("myValue")); assertTrue(fstd.createTableDescriptor(td)); Path tableDir=fstd.getTableDir(htd.getTableName()); Path tmpTableDir=new Path(tableDir,FSTableDescriptors.TMP_DIR); FileStatus[] statuses=fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); assertEquals(td,FSTableDescriptors.getTableDescriptorFromFs(fs,tableDir)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFormatTableInfoSequenceId(){ Path p0=assertWriteAndReadSequenceId(0); StringBuilder sb=new StringBuilder(); for (int i=0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { sb.append("0"); } assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(),p0.getName()); Path p2=assertWriteAndReadSequenceId(2); Path p10000=assertWriteAndReadSequenceId(10000); Path p=new Path(p0.getParent(),FSTableDescriptors.TABLEINFO_FILE_PREFIX); FileStatus fs=new FileStatus(0,false,0,0,0,p); FileStatus fs0=new FileStatus(0,false,0,0,0,p0); FileStatus fs2=new FileStatus(0,false,0,0,0,p2); FileStatus fs10000=new FileStatus(0,false,0,0,0,p10000); Comparator comparator=FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR; assertTrue(comparator.compare(fs,fs0) > 0); assertTrue(comparator.compare(fs0,fs2) > 0); assertTrue(comparator.compare(fs2,fs10000) > 0); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testCacheConsistency() throws IOException, InterruptedException { final String name="testCacheConsistency"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); FSTableDescriptors chtds=new FSTableDescriptorsTest(UTIL.getConfiguration(),fs,rootdir); FSTableDescriptors nonchtds=new FSTableDescriptorsTest(UTIL.getConfiguration(),fs,rootdir,false,false); final int count=10; for (int i=0; i < count; i++) { HTableDescriptor htd=new HTableDescriptor(name + i); nonchtds.createTableDescriptor(htd); } for (int i=0; i < count; i++) { assertTrue(chtds.get(TableName.valueOf(name + i)) != null); } assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); HTableDescriptor htd=new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); nonchtds.createTableDescriptor(htd); assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); for ( Map.Entry entry : nonchtds.getAll().entrySet()) { String t=(String)entry.getKey(); HTableDescriptor nchtd=(HTableDescriptor)entry.getValue(); assertTrue("expected " + htd.toString() + " got: "+ chtds.get(TableName.valueOf(t)).toString(),(nchtd.equals(chtds.get(TableName.valueOf(t))))); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSequenceIdAdvancesOnTableInfo() throws IOException { Path testdir=UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testSequenceidAdvancesOnTableInfo")); TableDescriptor td=new TableDescriptor(htd); FileSystem fs=FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,testdir); Path p0=fstd.updateTableDescriptor(td); int i0=FSTableDescriptors.getTableInfoSequenceId(p0); Path p1=fstd.updateTableDescriptor(td); assertTrue(!fs.exists(p0)); int i1=FSTableDescriptors.getTableInfoSequenceId(p1); assertTrue(i1 == i0 + 1); Path p2=fstd.updateTableDescriptor(td); assertTrue(!fs.exists(p1)); int i2=FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); td=new TableDescriptor(htd); Path p3=fstd.updateTableDescriptor(td); assertTrue(!fs.exists(p2)); int i3=FSTableDescriptors.getTableInfoSequenceId(p3); assertTrue(i3 == i2 + 1); TableDescriptor descriptor=fstd.getDescriptor(htd.getTableName()); assertEquals(descriptor,td); }

InternalCallVerifier NullVerifier 
@Test public void testNoSuchTable() throws IOException { final String name="testNoSuchTable"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); TableDescriptors htds=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); assertNull("There shouldn't be any HTD for this table",htds.get(TableName.valueOf("NoSuchTable"))); }

IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testHTableDescriptorsNoCache() throws IOException, InterruptedException { final String name="testHTableDescriptorsNoCache"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); Path rootdir=new Path(UTIL.getDataTestDir(),name); FSTableDescriptors htds=new FSTableDescriptorsTest(UTIL.getConfiguration(),fs,rootdir,false,false); final int count=10; for (int i=0; i < count; i++) { HTableDescriptor htd=new HTableDescriptor(name + i); htds.createTableDescriptor(htd); } for (int i=0; i < 2 * count; i++) { assertNotNull("Expected HTD, got null instead",htds.get(TableName.valueOf(name + i % 2))); } for (int i=0; i < count; i++) { HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); htds.updateTableDescriptor(new TableDescriptor(htd)); } for (int i=0; i < count; i++) { assertNotNull("Expected HTD, got null instead",htds.get(TableName.valueOf(name + i))); assertTrue("Column Family " + i + " missing",htds.get(TableName.valueOf(name + i)).hasFamily(Bytes.toBytes("" + i))); } assertEquals(count * 4,htds.invocations); assertEquals("expected=0, actual=" + htds.cachehits,0,htds.cachehits); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testReadingHTDFromFS() throws IOException { final String name="testReadingHTDFromFS"; FileSystem fs=FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd=new HTableDescriptor(TableName.valueOf(name)); TableDescriptor td=new TableDescriptor(htd); Path rootdir=UTIL.getDataTestDir(name); FSTableDescriptors fstd=new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir); fstd.createTableDescriptor(td); TableDescriptor td2=FSTableDescriptors.getTableDescriptorFromFs(fs,rootdir,htd.getTableName()); assertTrue(td.equals(td2)); }

Class: org.apache.hadoop.hbase.util.TestFSUtils

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRenameAndSetModifyTime() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); Configuration conf=htu.getConfiguration(); MiniDFSCluster cluster=htu.startMiniDFSCluster(1); assertTrue(FSUtils.isHDFS(conf)); FileSystem fs=FileSystem.get(conf); Path testDir=htu.getDataTestDirOnTestFS("testArchiveFile"); String file=UUID.randomUUID().toString(); Path p=new Path(testDir,file); FSDataOutputStream out=fs.create(p); out.close(); assertTrue("The created file should be present",FSUtils.isExists(fs,p)); long expect=System.currentTimeMillis() + 1000; assertNotEquals(expect,fs.getFileStatus(p).getModificationTime()); ManualEnvironmentEdge mockEnv=new ManualEnvironmentEdge(); mockEnv.setValue(expect); EnvironmentEdgeManager.injectEdge(mockEnv); try { String dstFile=UUID.randomUUID().toString(); Path dst=new Path(testDir,dstFile); assertTrue(FSUtils.renameAndSetModifyTime(fs,p,dst)); assertFalse("The moved file should not be present",FSUtils.isExists(fs,p)); assertTrue("The dst file should be present",FSUtils.isExists(fs,dst)); assertEquals(expect,fs.getFileStatus(dst).getModificationTime()); cluster.shutdown(); } finally { EnvironmentEdgeManager.reset(); } }

InternalCallVerifier BooleanVerifier 
/** * Test path compare and prefix checking. * @throws IOException */ @Test public void testMatchingTail() throws IOException { HBaseTestingUtility htu=new HBaseTestingUtility(); final FileSystem fs=htu.getTestFileSystem(); Path rootdir=htu.getDataTestDir(); assertTrue(rootdir.depth() > 1); Path partPath=new Path("a","b"); Path fullPath=new Path(rootdir,partPath); Path fullyQualifiedPath=fs.makeQualified(fullPath); assertFalse(FSUtils.isMatchingTail(fullPath,partPath)); assertFalse(FSUtils.isMatchingTail(fullPath,partPath.toString())); assertTrue(FSUtils.isStartingWithPath(rootdir,fullPath.toString())); assertTrue(FSUtils.isStartingWithPath(fullyQualifiedPath,fullPath.toString())); assertFalse(FSUtils.isStartingWithPath(rootdir,partPath.toString())); assertFalse(FSUtils.isMatchingTail(fullyQualifiedPath,partPath)); assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath,fullPath)); assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath,fullPath.toString())); assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath,fs.makeQualified(fullPath))); assertTrue(FSUtils.isStartingWithPath(rootdir,fullyQualifiedPath.toString())); assertFalse(FSUtils.isMatchingTail(fullPath,new Path("x"))); assertFalse(FSUtils.isMatchingTail(new Path("x"),fullPath)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Ugly test that ensures we can get at the hedged read counters in dfsclient. * Does a bit of preading with hedged reads enabled using code taken from hdfs TestPread. * @throws Exception */ @Test public void testDFSHedgedReadMetrics() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); Configuration conf=htu.getConfiguration(); conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,5); conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,0); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,4096); conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY,4096); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,0); conf.setBoolean("dfs.datanode.transferTo.allowed",false); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); DFSHedgedReadMetrics metrics=FSUtils.getDFSHedgedReadMetrics(conf); assertEquals(0,metrics.getHedgedReadOps()); FileSystem fileSys=cluster.getFileSystem(); try { Path p=new Path("preadtest.dat"); DFSTestUtil.createFile(fileSys,p,12 * blockSize,12 * blockSize,blockSize,(short)3,seed); pReadFile(fileSys,p); cleanupFile(fileSys,p); assertTrue(metrics.getHedgedReadOps() > 0); } finally { fileSys.close(); cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testDeleteAndExists() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); Configuration conf=htu.getConfiguration(); conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK,true); FileSystem fs=FileSystem.get(conf); FsPermission perms=FSUtils.getFilePermissions(fs,conf,HConstants.DATA_FILE_UMASK_KEY); String file=UUID.randomUUID().toString(); Path p=new Path(htu.getDataTestDir(),"temptarget" + File.separator + file); Path p1=new Path(htu.getDataTestDir(),"temppath" + File.separator + file); try { FSDataOutputStream out=FSUtils.create(conf,fs,p,perms,null); out.close(); assertTrue("The created file should be present",FSUtils.isExists(fs,p)); FSUtils.delete(fs,p,false); FSDataOutputStream out1=FSUtils.create(conf,fs,p1,perms,null); out1.close(); FSUtils.delete(fs,p1,true); assertFalse("The created file should be present",FSUtils.isExists(fs,p1)); } finally { FSUtils.delete(fs,p,true); FSUtils.delete(fs,p1,true); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPermMask() throws Exception { Configuration conf=HBaseConfiguration.create(); FileSystem fs=FileSystem.get(conf); FsPermission defaultFsPerm=FSUtils.getFilePermissions(fs,conf,HConstants.DATA_FILE_UMASK_KEY); assertEquals(FsPermission.getFileDefault(),defaultFsPerm); conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK,true); FsPermission defaultStartPerm=FSUtils.getFilePermissions(fs,conf,HConstants.DATA_FILE_UMASK_KEY); assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS),defaultStartPerm); conf.setStrings(HConstants.DATA_FILE_UMASK_KEY,"077"); FsPermission filePerm=FSUtils.getFilePermissions(fs,conf,HConstants.DATA_FILE_UMASK_KEY); assertEquals(new FsPermission("700"),filePerm); Path p=new Path("target" + File.separator + UUID.randomUUID().toString()); try { FSDataOutputStream out=FSUtils.create(conf,fs,p,filePerm,null); out.close(); FileStatus stat=fs.getFileStatus(p); assertEquals(new FsPermission("700"),stat.getPermission()); } finally { fs.delete(p,true); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testVersion() throws DeserializationException, IOException { HBaseTestingUtility htu=new HBaseTestingUtility(); final FileSystem fs=htu.getTestFileSystem(); final Path rootdir=htu.getDataTestDir(); assertNull(FSUtils.getVersion(fs,rootdir)); Path versionFile=new Path(rootdir,HConstants.VERSION_FILE_NAME); FSDataOutputStream s=fs.create(versionFile); final String version=HConstants.FILE_SYSTEM_VERSION; s.writeUTF(version); s.close(); assertTrue(fs.exists(versionFile)); FileStatus[] status=fs.listStatus(versionFile); assertNotNull(status); assertTrue(status.length > 0); String newVersion=FSUtils.getVersion(fs,rootdir); assertEquals(version.length(),newVersion.length()); assertEquals(version,newVersion); assertEquals(version,FSUtils.getVersion(fs,rootdir)); FSUtils.checkVersion(fs,rootdir,true); }

InternalCallVerifier BooleanVerifier 
@Test public void testIsHDFS() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); htu.getConfiguration().setBoolean("dfs.support.append",false); assertFalse(FSUtils.isHDFS(htu.getConfiguration())); htu.getConfiguration().setBoolean("dfs.support.append",true); MiniDFSCluster cluster=null; try { cluster=htu.startMiniDFSCluster(1); assertTrue(FSUtils.isHDFS(htu.getConfiguration())); assertTrue(FSUtils.isAppendSupported(htu.getConfiguration())); } finally { if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testcomputeHDFSBlocksDistribution() throws Exception { HBaseTestingUtility htu=new HBaseTestingUtility(); final int DEFAULT_BLOCK_SIZE=1024; htu.getConfiguration().setLong("dfs.blocksize",DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster=null; Path testFile=null; try { String hosts[]=new String[]{"host1","host2","host3"}; cluster=htu.startMiniDFSCluster(hosts); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); testFile=new Path("/test1.txt"); WriteDataToHDFS(fs,testFile,2 * DEFAULT_BLOCK_SIZE); final long maxTime=System.currentTimeMillis() + 2000; boolean ok; do { ok=true; FileStatus status=fs.getFileStatus(testFile); HDFSBlocksDistribution blocksDistribution=FSUtils.computeHDFSBlocksDistribution(fs,status,0,status.getLen()); long uniqueBlocksTotalWeight=blocksDistribution.getUniqueBlocksTotalWeight(); for ( String host : hosts) { long weight=blocksDistribution.getWeight(host); ok=(ok && uniqueBlocksTotalWeight == weight); } } while (!ok && System.currentTimeMillis() < maxTime); assertTrue(ok); } finally { htu.shutdownMiniDFSCluster(); } try { String hosts[]=new String[]{"host1","host2","host3","host4"}; cluster=htu.startMiniDFSCluster(hosts); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); testFile=new Path("/test2.txt"); WriteDataToHDFS(fs,testFile,3 * DEFAULT_BLOCK_SIZE); final long maxTime=System.currentTimeMillis() + 2000; long weight; long uniqueBlocksTotalWeight; do { FileStatus status=fs.getFileStatus(testFile); HDFSBlocksDistribution blocksDistribution=FSUtils.computeHDFSBlocksDistribution(fs,status,0,status.getLen()); uniqueBlocksTotalWeight=blocksDistribution.getUniqueBlocksTotalWeight(); String tophost=blocksDistribution.getTopHosts().get(0); weight=blocksDistribution.getWeight(tophost); } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime); assertTrue(uniqueBlocksTotalWeight == weight); } finally { htu.shutdownMiniDFSCluster(); } try { String hosts[]=new String[]{"host1","host2","host3","host4"}; cluster=htu.startMiniDFSCluster(hosts); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); testFile=new Path("/test3.txt"); WriteDataToHDFS(fs,testFile,DEFAULT_BLOCK_SIZE); final long maxTime=System.currentTimeMillis() + 2000; HDFSBlocksDistribution blocksDistribution; do { FileStatus status=fs.getFileStatus(testFile); blocksDistribution=FSUtils.computeHDFSBlocksDistribution(fs,status,0,status.getLen()); } while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime); assertEquals("Wrong number of hosts distributing blocks.",3,blocksDistribution.getTopHosts().size()); } finally { htu.shutdownMiniDFSCluster(); } }

Class: org.apache.hadoop.hbase.util.TestFastLongHistogram

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier 
@Test public void testAdaptionOfChange(){ FastLongHistogram hist=new FastLongHistogram(100,0,100); Random rand=new Random(); for (int n=0; n < 10; n++) { for (int i=0; i < 900; i++) { hist.add(rand.nextInt(100),1); } for (int i=0; i < 100; i++) { hist.add(1000 + rand.nextInt(100),1); } long[] vals=hist.getQuantiles(new double[]{0.25,0.75,0.95}); System.out.println(Arrays.toString(vals)); if (n == 0) { Assert.assertTrue("Out of possible value",vals[0] >= 0 && vals[0] <= 50); Assert.assertTrue("Out of possible value",vals[1] >= 50 && vals[1] <= 100); Assert.assertTrue("Out of possible value",vals[2] >= 900 && vals[2] <= 1100); } hist.reset(); } }

Class: org.apache.hadoop.hbase.util.TestHBaseFsckEncryption

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFsckWithEncryption() throws Exception { Table table=TEST_UTIL.getConnection().getTable(htd.getTableName()); try { byte[] values={'A','B','C','D'}; for (int i=0; i < values.length; i++) { for (int j=0; j < values.length; j++) { Put put=new Put(new byte[]{values[i],values[j]}); put.addColumn(Bytes.toBytes("cf"),new byte[]{},new byte[]{values[i],values[j]}); table.put(put); } } } finally { table.close(); } TEST_UTIL.getHBaseAdmin().flush(htd.getTableName()); final List paths=findStorefilePaths(htd.getTableName()); assertTrue(paths.size() > 0); for ( Path path : paths) { assertTrue("Store file " + path + " has incorrect key",Bytes.equals(cfKey.getEncoded(),extractHFileKey(path))); } HBaseFsck res=HbckTestingUtil.doHFileQuarantine(conf,htd.getTableName()); assertEquals(res.getRetCode(),0); HFileCorruptionChecker hfcc=res.getHFilecorruptionChecker(); assertEquals(hfcc.getCorrupted().size(),0); assertEquals(hfcc.getFailures().size(),0); assertEquals(hfcc.getQuarantined().size(),0); assertEquals(hfcc.getMissing().size(),0); }

Class: org.apache.hadoop.hbase.util.TestHBaseFsckMOB

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * This creates a table and then corrupts a mob file. Hbck should quarantine the file. */ @Test(timeout=180000) public void testQuarantineCorruptMobFile() throws Exception { TableName table=TableName.valueOf(name.getMethodName()); try { setupMobTable(table); assertEquals(ROWKEYS.length,countRows()); admin.flush(table); FileSystem fs=FileSystem.get(conf); Path mobFile=getFlushedMobFile(fs,table); admin.disableTable(table); String corruptMobFile=createMobFileName(mobFile.getName()); Path corrupt=new Path(mobFile.getParent(),corruptMobFile); TestHFile.truncateFile(fs,mobFile,corrupt); LOG.info("Created corrupted mob file " + corrupt); HBaseFsck.debugLsr(conf,FSUtils.getRootDir(conf)); HBaseFsck.debugLsr(conf,MobUtils.getMobHome(conf)); admin.enableTable(table); HBaseFsck res=HbckTestingUtil.doHFileQuarantine(conf,table); assertEquals(res.getRetCode(),0); HFileCorruptionChecker hfcc=res.getHFilecorruptionChecker(); assertEquals(hfcc.getHFilesChecked(),4); assertEquals(hfcc.getCorrupted().size(),0); assertEquals(hfcc.getFailures().size(),0); assertEquals(hfcc.getQuarantined().size(),0); assertEquals(hfcc.getMissing().size(),0); assertEquals(hfcc.getMobFilesChecked(),5); assertEquals(hfcc.getCorruptedMobFiles().size(),1); assertEquals(hfcc.getFailureMobFiles().size(),0); assertEquals(hfcc.getQuarantinedMobFiles().size(),1); assertEquals(hfcc.getMissedMobFiles().size(),0); String quarantinedMobFile=hfcc.getQuarantinedMobFiles().iterator().next().getName(); assertEquals(corruptMobFile,quarantinedMobFile); } finally { cleanupTable(table); } }

Class: org.apache.hadoop.hbase.util.TestHBaseFsckOneRS

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * This creates a table and then corrupts an hfile. Hbck should quarantine the file. */ @Test(timeout=180000) public void testQuarantineCorruptHFile() throws Exception { TableName table=TableName.valueOf(name.getMethodName()); try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); admin.flush(table); FileSystem fs=FileSystem.get(conf); Path hfile=getFlushedHFile(fs,table); admin.disableTable(table); Path corrupt=new Path(hfile.getParent(),"deadbeef"); TestHFile.truncateFile(fs,hfile,corrupt); LOG.info("Created corrupted file " + corrupt); HBaseFsck.debugLsr(conf,FSUtils.getRootDir(conf)); HBaseFsck res=HbckTestingUtil.doHFileQuarantine(conf,table); assertEquals(res.getRetCode(),0); HFileCorruptionChecker hfcc=res.getHFilecorruptionChecker(); assertEquals(hfcc.getHFilesChecked(),5); assertEquals(hfcc.getCorrupted().size(),1); assertEquals(hfcc.getFailures().size(),0); assertEquals(hfcc.getQuarantined().size(),1); assertEquals(hfcc.getMissing().size(),0); admin.enableTable(table); } finally { cleanupTable(table); } }

IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testHbckAfterRegionMerge() throws Exception { TableName table=TableName.valueOf("testMergeRegionFilesInHdfs"); Table meta=null; try { TEST_UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false); setupTable(table); assertEquals(ROWKEYS.length,countRows()); try (RegionLocator rl=connection.getRegionLocator(tbl.getName())){ admin.flush(table); HRegionInfo region1=rl.getRegionLocation(Bytes.toBytes("A")).getRegionInfo(); HRegionInfo region2=rl.getRegionLocation(Bytes.toBytes("B")).getRegionInfo(); int regionCountBeforeMerge=rl.getAllRegionLocations().size(); assertNotEquals(region1,region2); admin.mergeRegions(region1.getEncodedNameAsBytes(),region2.getEncodedNameAsBytes(),false); long timeout=System.currentTimeMillis() + 30 * 1000; while (true) { if (rl.getAllRegionLocations().size() < regionCountBeforeMerge) { break; } else if (System.currentTimeMillis() > timeout) { fail("Time out waiting on region " + region1.getEncodedName() + " and "+ region2.getEncodedName()+ " be merged"); } Thread.sleep(10); } assertEquals(ROWKEYS.length,countRows()); HBaseFsck hbck=doFsck(conf,false); assertNoErrors(hbck); } } finally { TEST_UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(true); cleanupTable(table); IOUtils.closeQuietly(meta); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test -fixHdfsHoles doesn't work with -noHdfsChecking option, * and -noHdfsChecking can't detect orphan Hdfs region. */ @Test(timeout=180000) public void testFixHdfsHolesNotWorkingWithNoHdfsChecking() throws Exception { TableName table=TableName.valueOf("testFixHdfsHolesNotWorkingWithNoHdfsChecking"); try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); admin.disableTable(table); deleteRegion(conf,tbl.getTableDescriptor(),Bytes.toBytes("A"),Bytes.toBytes("B"),true,true,false,true,HRegionInfo.DEFAULT_REPLICA_ID); admin.enableTable(table); HRegionInfo hriOverlap=createRegion(tbl.getTableDescriptor(),Bytes.toBytes("A2"),Bytes.toBytes("B")); TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().waitForAssignment(hriOverlap); ServerName server=regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap,server,REGION_ONLINE_TIMEOUT); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_HDFS_REGION,HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); HBaseFsck fsck=new HBaseFsck(conf,hbfsckExecutorService); fsck.connect(); HBaseFsck.setDisplayFullReport(); fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); assertErrors(fsck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); fsck.close(); fsck=new HBaseFsck(conf,hbfsckExecutorService); fsck.connect(); HBaseFsck.setDisplayFullReport(); fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.setFixHdfsHoles(true); fsck.setFixHdfsOverlaps(true); fsck.setFixHdfsOrphans(true); fsck.onlineHbck(); assertFalse(fsck.shouldRerun()); assertErrors(fsck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); fsck.close(); } finally { if (admin.isTableDisabled(table)) { admin.enableTable(table); } cleanupTable(table); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * The region is not deployed when the table is disabled. */ @Test(timeout=180000) public void testRegionShouldNotBeDeployed() throws Exception { TableName table=TableName.valueOf("tableRegionShouldNotBeDeployed"); try { LOG.info("Starting testRegionShouldNotBeDeployed."); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); assertTrue(cluster.waitForActiveAndReadyMaster()); byte[][] SPLIT_KEYS=new byte[][]{new byte[0],Bytes.toBytes("aaa"),Bytes.toBytes("bbb"),Bytes.toBytes("ccc"),Bytes.toBytes("ddd")}; HTableDescriptor htdDisabled=new HTableDescriptor(table); htdDisabled.addFamily(new HColumnDescriptor(FAM)); FSTableDescriptors fstd=new FSTableDescriptors(conf); fstd.createTableDescriptor(htdDisabled); List disabledRegions=TEST_UTIL.createMultiRegionsInMeta(conf,htdDisabled,SPLIT_KEYS); HRegionServer hrs=cluster.getRegionServer(0); admin.disableTable(table); admin.enableTable(table); admin.disableTable(table); HRegionInfo region=disabledRegions.remove(0); byte[] regionName=region.getRegionName(); assertTrue(cluster.getServerWith(regionName) == -1); HRegion r=HRegion.openHRegion(region,htdDisabled,hrs.getWAL(region),conf); hrs.addToOnlineRegions(r); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.SHOULD_NOT_BE_DEPLOYED}); doFsck(conf,true); assertNoErrors(doFsck(conf,false)); } finally { admin.enableTable(table); cleanupTable(table); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test -noHdfsChecking option can detect and fix assignments issue. */ @Test(timeout=180000) public void testFixAssignmentsAndNoHdfsChecking() throws Exception { TableName table=TableName.valueOf("testFixAssignmentsAndNoHdfsChecking"); try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); deleteRegion(conf,tbl.getTableDescriptor(),Bytes.toBytes("A"),Bytes.toBytes("B"),true,false,false,false,HRegionInfo.DEFAULT_REPLICA_ID); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NOT_DEPLOYED,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); HBaseFsck fsck=new HBaseFsck(conf,hbfsckExecutorService); fsck.connect(); HBaseFsck.setDisplayFullReport(); fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); assertErrors(fsck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NOT_DEPLOYED,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); fsck.close(); fsck=new HBaseFsck(conf,hbfsckExecutorService); fsck.connect(); HBaseFsck.setDisplayFullReport(); fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.setFixAssignments(true); fsck.onlineHbck(); assertTrue(fsck.shouldRerun()); fsck.onlineHbck(); assertNoErrors(fsck); assertEquals(ROWKEYS.length,countRows()); fsck.close(); } finally { cleanupTable(table); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Split crashed after write to hbase:meta finished for the parent region, but * failed to write daughters (pre HBASE-7721 codebase) */ @Test(timeout=75000) public void testSplitDaughtersNotInMeta() throws Exception { TableName table=TableName.valueOf("testSplitdaughtersNotInMeta"); Table meta=connection.getTable(TableName.META_TABLE_NAME,tableExecutorService); try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); admin.flush(table); try (RegionLocator rl=connection.getRegionLocator(tbl.getName())){ HRegionLocation location=rl.getRegionLocation(Bytes.toBytes("B")); HRegionInfo hri=location.getRegionInfo(); admin.enableCatalogJanitor(false); byte[] regionName=location.getRegionInfo().getRegionName(); admin.splitRegion(location.getRegionInfo().getRegionName(),Bytes.toBytes("BM")); TestEndToEndSplitTransaction.blockUntilRegionSplit(conf,60000,regionName,true); PairOfSameType daughters=MetaTableAccessor.getDaughterRegions(meta.get(new Get(regionName))); ServerName firstSN=rl.getRegionLocation(daughters.getFirst().getStartKey()).getServerName(); ServerName secondSN=rl.getRegionLocation(daughters.getSecond().getStartKey()).getServerName(); undeployRegion(connection,firstSN,daughters.getFirst()); undeployRegion(connection,secondSN,daughters.getSecond()); List deletes=new ArrayList<>(); deletes.add(new Delete(daughters.getFirst().getRegionName())); deletes.add(new Delete(daughters.getSecond().getRegionName())); meta.delete(deletes); RegionStates regionStates=TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); regionStates.deleteRegion(daughters.getFirst()); regionStates.deleteRegion(daughters.getSecond()); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED,HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); hbck=doFsck(conf,true,true,false,false,false,false,false,false,false,false,false,null); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED,HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); Get get=new Get(hri.getRegionName()); Result result=meta.get(get); assertNotNull(result); assertNotNull(MetaTableAccessor.getHRegionInfo(result)); assertEquals(ROWKEYS.length,countRows()); assertEquals(rl.getStartKeys().length,SPLITS.length + 1 + 1); assertNoErrors(doFsck(conf,false)); } } finally { admin.enableCatalogJanitor(true); meta.close(); cleanupTable(table); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test -noHdfsChecking option can detect region is not in meta but deployed. * However, it can not fix it without checking Hdfs because we need to get * the region info from Hdfs in this case, then to patch the meta. */ @Test(timeout=180000) public void testFixMetaNotWorkingWithNoHdfsChecking() throws Exception { TableName table=TableName.valueOf("testFixMetaNotWorkingWithNoHdfsChecking"); try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); deleteRegion(conf,tbl.getTableDescriptor(),Bytes.toBytes("A"),Bytes.toBytes("B"),false,true,false,false,HRegionInfo.DEFAULT_REPLICA_ID); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); HBaseFsck fsck=new HBaseFsck(conf,hbfsckExecutorService); fsck.connect(); HBaseFsck.setDisplayFullReport(); fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); assertErrors(fsck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); fsck.close(); fsck=new HBaseFsck(conf,hbfsckExecutorService); fsck.connect(); HBaseFsck.setDisplayFullReport(); fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.setFixAssignments(true); fsck.setFixMeta(true); fsck.onlineHbck(); assertFalse(fsck.shouldRerun()); assertErrors(fsck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); fsck.close(); fsck=doFsck(conf,true); assertTrue(fsck.shouldRerun()); fsck=doFsck(conf,true); assertNoErrors(fsck); } finally { cleanupTable(table); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testCheckReplication() throws Exception { HBaseFsck hbck=doFsck(conf,false); assertNoErrors(hbck); ReplicationAdmin replicationAdmin=new ReplicationAdmin(conf); Assert.assertEquals(0,replicationAdmin.getPeersCount()); int zkPort=conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT,HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT); replicationAdmin.addPeer("1","127.0.0.1:" + zkPort + ":/hbase"); replicationAdmin.getPeersCount(); Assert.assertEquals(1,replicationAdmin.getPeersCount()); ZooKeeperWatcher zkw=new ZooKeeperWatcher(conf,"Test Hbase Fsck",connection); ReplicationQueues repQueues=ReplicationFactory.getReplicationQueues(zkw,conf,connection); repQueues.init("server1"); repQueues.addLog("1","file1"); repQueues.addLog("1-server2","file1"); Assert.assertEquals(2,repQueues.getAllQueues().size()); hbck=doFsck(conf,false); assertNoErrors(hbck); repQueues.addLog("2","file1"); repQueues.addLog("2-server2","file1"); Assert.assertEquals(4,repQueues.getAllQueues().size()); hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE,HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE}); hbck=doFsck(conf,true); hbck=doFsck(conf,false); assertNoErrors(hbck); Assert.assertEquals(2,repQueues.getAllQueues().size()); Assert.assertNull(repQueues.getLogsInQueue("2")); Assert.assertNull(repQueues.getLogsInQueue("2-sever2")); replicationAdmin.removePeer("1"); repQueues.removeAllQueues(); zkw.close(); replicationAdmin.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * A split parent in meta, in hdfs, and not deployed */ @Test(timeout=180000) public void testLingeringSplitParent() throws Exception { TableName table=TableName.valueOf("testLingeringSplitParent"); Table meta=null; try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); admin.flush(table); HRegionLocation location; try (RegionLocator rl=connection.getRegionLocator(tbl.getName())){ location=rl.getRegionLocation(Bytes.toBytes("B")); } deleteRegion(conf,tbl.getTableDescriptor(),Bytes.toBytes("B"),Bytes.toBytes("C"),true,true,false); meta=connection.getTable(TableName.META_TABLE_NAME,tableExecutorService); HRegionInfo hri=location.getRegionInfo(); HRegionInfo a=new HRegionInfo(tbl.getName(),Bytes.toBytes("B"),Bytes.toBytes("BM")); HRegionInfo b=new HRegionInfo(tbl.getName(),Bytes.toBytes("BM"),Bytes.toBytes("C")); hri.setOffline(true); hri.setSplit(true); MetaTableAccessor.addRegionToMeta(meta,hri,a,b); meta.close(); admin.flush(TableName.META_TABLE_NAME); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); hbck=doFsck(conf,true); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); assertFalse(hbck.shouldRerun()); hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.LINGERING_SPLIT_PARENT,HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN}); hbck=new HBaseFsck(conf,hbfsckExecutorService); hbck.connect(); HBaseFsck.setDisplayFullReport(); hbck.setTimeLag(0); hbck.setFixSplitParents(true); hbck.onlineHbck(); assertTrue(hbck.shouldRerun()); hbck.close(); Get get=new Get(hri.getRegionName()); Result result=meta.get(get); assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,HConstants.SPLITA_QUALIFIER).isEmpty()); assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,HConstants.SPLITB_QUALIFIER).isEmpty()); admin.flush(TableName.META_TABLE_NAME); doFsck(conf,true); assertNoErrors(doFsck(conf,false)); assertEquals(ROWKEYS.length,countRows()); } finally { cleanupTable(table); IOUtils.closeQuietly(meta); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=180000) public void testReadOnlyProperty() throws Exception { HBaseFsck hbck=doFsck(conf,false); Assert.assertEquals("shouldIgnorePreCheckPermission",true,hbck.shouldIgnorePreCheckPermission()); hbck=doFsck(conf,true); Assert.assertEquals("shouldIgnorePreCheckPermission",false,hbck.shouldIgnorePreCheckPermission()); hbck=doFsck(conf,true); hbck.setIgnorePreCheckPermission(true); Assert.assertEquals("shouldIgnorePreCheckPermission",true,hbck.shouldIgnorePreCheckPermission()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=180000) public void testHbckFixOrphanTable() throws Exception { TableName table=TableName.valueOf("tableInfo"); FileSystem fs=null; Path tableinfo=null; try { setupTable(table); Path hbaseTableDir=FSUtils.getTableDir(FSUtils.getRootDir(conf),table); fs=hbaseTableDir.getFileSystem(conf); FileStatus status=FSTableDescriptors.getTableInfoPath(fs,hbaseTableDir); tableinfo=status.getPath(); fs.rename(tableinfo,new Path("/.tableinfo")); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.NO_TABLEINFO_FILE}); hbck=doFsck(conf,true); assertNoErrors(hbck); status=null; status=FSTableDescriptors.getTableInfoPath(fs,hbaseTableDir); assertNotNull(status); HTableDescriptor htd=admin.getTableDescriptor(table); htd.setValue("NOT_DEFAULT","true"); admin.disableTable(table); admin.modifyTable(table,htd); admin.enableTable(table); fs.delete(status.getPath(),true); htd=admin.getTableDescriptor(table); hbck=doFsck(conf,true); assertNoErrors(hbck); status=FSTableDescriptors.getTableInfoPath(fs,hbaseTableDir); assertNotNull(status); htd=admin.getTableDescriptor(table); assertEquals(htd.getValue("NOT_DEFAULT"),"true"); } finally { if (fs != null) { fs.rename(new Path("/.tableinfo"),tableinfo); } cleanupTable(table); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that LINGERING_SPLIT_PARENT is not erroneously reported for * valid cases where the daughters are there. */ @Test(timeout=180000) public void testValidLingeringSplitParent() throws Exception { TableName table=TableName.valueOf("testLingeringSplitParent"); Table meta=null; try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); admin.flush(table); try (RegionLocator rl=connection.getRegionLocator(tbl.getName())){ HRegionLocation location=rl.getRegionLocation(Bytes.toBytes("B")); meta=connection.getTable(TableName.META_TABLE_NAME,tableExecutorService); HRegionInfo hri=location.getRegionInfo(); byte[] regionName=location.getRegionInfo().getRegionName(); admin.splitRegion(location.getRegionInfo().getRegionName(),Bytes.toBytes("BM")); TestEndToEndSplitTransaction.blockUntilRegionSplit(conf,60000,regionName,true); HBaseFsck hbck=doFsck(conf,true,true,false,false,false,true,true,true,false,false,false,null); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{}); Get get=new Get(hri.getRegionName()); Result result=meta.get(get); assertNotNull(result); assertNotNull(MetaTableAccessor.getHRegionInfo(result)); assertEquals(ROWKEYS.length,countRows()); assertEquals(rl.getStartKeys().length,SPLITS.length + 1 + 1); assertNoErrors(doFsck(conf,false)); } } finally { cleanupTable(table); IOUtils.closeQuietly(meta); } }

Class: org.apache.hadoop.hbase.util.TestHBaseFsckTwoRS

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This creates and fixes a bad table where an overlap group of * 3 regions. Set HBaseFsck.maxMerge to 2 to trigger sideline overlapped * region. Mess around the meta data so that closeRegion/offlineRegion * throws exceptions. */ @Test(timeout=180000) public void testSidelineOverlapRegion() throws Exception { TableName table=TableName.valueOf("testSidelineOverlapRegion"); try { setupTable(table); assertEquals(ROWKEYS.length,countRows()); MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HMaster master=cluster.getMaster(); HRegionInfo hriOverlap1=createRegion(tbl.getTableDescriptor(),Bytes.toBytes("A"),Bytes.toBytes("AB")); master.assignRegion(hriOverlap1); master.getAssignmentManager().waitForAssignment(hriOverlap1); HRegionInfo hriOverlap2=createRegion(tbl.getTableDescriptor(),Bytes.toBytes("AB"),Bytes.toBytes("B")); master.assignRegion(hriOverlap2); master.getAssignmentManager().waitForAssignment(hriOverlap2); HBaseFsck hbck=doFsck(conf,false); assertErrors(hbck,new HBaseFsck.ErrorReporter.ERROR_CODE[]{HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS,HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS,HBaseFsck.ErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN}); assertEquals(3,hbck.getOverlapGroups(table).size()); assertEquals(ROWKEYS.length,countRows()); Multimap overlapGroups=hbck.getOverlapGroups(table); ServerName serverName=null; byte[] regionName=null; for ( HBaseFsck.HbckInfo hbi : overlapGroups.values()) { if ("A".equals(Bytes.toString(hbi.getStartKey())) && "B".equals(Bytes.toString(hbi.getEndKey()))) { regionName=hbi.getRegionName(); int k=cluster.getServerWith(regionName); for (int i=0; i < 3; i++) { if (i != k) { HRegionServer rs=cluster.getRegionServer(i); serverName=rs.getServerName(); break; } } HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection)connection,cluster.getRegionServer(k).getServerName(),hbi.getHdfsHRI()); admin.offline(regionName); break; } } assertNotNull(regionName); assertNotNull(serverName); try (Table meta=connection.getTable(TableName.META_TABLE_NAME,tableExecutorService)){ Put put=new Put(regionName); put.addColumn(HConstants.CATALOG_FAMILY,HConstants.SERVER_QUALIFIER,Bytes.toBytes(serverName.getHostAndPort())); meta.put(put); } HBaseFsck fsck=new HBaseFsck(conf,hbfsckExecutorService); fsck.connect(); HBaseFsck.setDisplayFullReport(); fsck.setTimeLag(0); fsck.setFixAssignments(true); fsck.setFixMeta(true); fsck.setFixHdfsHoles(true); fsck.setFixHdfsOverlaps(true); fsck.setFixHdfsOrphans(true); fsck.setFixVersionFile(true); fsck.setSidelineBigOverlaps(true); fsck.setMaxMerge(2); fsck.onlineHbck(); fsck.close(); HBaseFsck hbck2=doFsck(conf,false); assertNoErrors(hbck2); assertEquals(0,hbck2.getOverlapGroups(table).size()); assertTrue(ROWKEYS.length > countRows()); } finally { cleanupTable(table); } }

Class: org.apache.hadoop.hbase.util.TestIdReadWriteLock

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testMultipleClients() throws Exception { ExecutorService exec=Executors.newFixedThreadPool(NUM_THREADS); try { ExecutorCompletionService ecs=new ExecutorCompletionService(exec); for (int i=0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i)); for (int i=0; i < NUM_THREADS; ++i) { Future result=ecs.take(); assertTrue(result.get()); } int entryPoolSize=idLock.purgeAndGetEntryPoolSize(); LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize); assertEquals(0,entryPoolSize); } finally { exec.shutdown(); exec.awaitTermination(5000,TimeUnit.MILLISECONDS); } }

Class: org.apache.hadoop.hbase.util.TestIncrementingEnvironmentEdge

InternalCallVerifier EqualityVerifier 
@Test public void testGetCurrentTimeUsesSystemClock(){ IncrementingEnvironmentEdge edge=new IncrementingEnvironmentEdge(1); assertEquals(1,edge.currentTime()); assertEquals(2,edge.currentTime()); assertEquals(3,edge.currentTime()); assertEquals(4,edge.currentTime()); }

Class: org.apache.hadoop.hbase.util.TestKeyLocker

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLocker(){ KeyLocker locker=new KeyLocker(); ReentrantLock lock1=locker.acquireLock("l1"); Assert.assertTrue(lock1.isHeldByCurrentThread()); ReentrantLock lock2=locker.acquireLock("l2"); Assert.assertTrue(lock2.isHeldByCurrentThread()); Assert.assertTrue(lock1 != lock2); ReentrantLock lock20=locker.acquireLock("l2"); Assert.assertTrue(lock20 == lock2); Assert.assertTrue(lock2.isHeldByCurrentThread()); Assert.assertTrue(lock20.isHeldByCurrentThread()); lock20.unlock(); Assert.assertTrue(lock20.isHeldByCurrentThread()); lock2.unlock(); Assert.assertFalse(lock20.isHeldByCurrentThread()); int lock2Hash=System.identityHashCode(lock2); lock2=null; lock20=null; System.gc(); System.gc(); System.gc(); ReentrantLock lock200=locker.acquireLock("l2"); Assert.assertNotEquals(lock2Hash,System.identityHashCode(lock200)); lock200.unlock(); Assert.assertFalse(lock200.isHeldByCurrentThread()); Assert.assertTrue(lock1.isHeldByCurrentThread()); lock1.unlock(); Assert.assertFalse(lock1.isHeldByCurrentThread()); }

Class: org.apache.hadoop.hbase.util.TestLoadTestKVGenerator

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testVerification(){ for (int i=0; i < 1000; ++i) { for (int qualIndex=0; qualIndex < 20; ++qualIndex) { byte[] qual=String.valueOf(qualIndex).getBytes(); byte[] rowKey=LoadTestKVGenerator.md5PrefixedKey(i).getBytes(); byte[] v=gen.generateRandomSizeValue(rowKey,qual); assertTrue(LoadTestKVGenerator.verify(v,rowKey,qual)); v[0]++; assertFalse(LoadTestKVGenerator.verify(v,rowKey,qual)); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier 
@Test public void testValueLength(){ for (int i=0; i < 1000; ++i) { byte[] v=gen.generateRandomSizeValue(Integer.toString(i).getBytes(),String.valueOf(rand.nextInt()).getBytes()); assertTrue(MIN_LEN <= v.length); assertTrue(v.length <= MAX_LEN); } }

Class: org.apache.hadoop.hbase.util.TestMergeTable

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Test merge. * Hand-makes regions of a mergeable size and adds the hand-made regions to * hand-made meta. The hand-made regions are created offline. We then start * up mini cluster, disables the hand-made table and starts in on merging. * @throws Exception */ @Test(timeout=300000) public void testMergeTable() throws Exception { HTableDescriptor desc=new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(Bytes.toBytes("test"))); desc.addFamily(new HColumnDescriptor(COLUMN_NAME)); UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE,64L * 1024L * 1024L); UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit",0); UTIL.startMiniDFSCluster(1); Path rootdir=UTIL.createRootDir(); FileSystem fs=FileSystem.get(UTIL.getConfiguration()); if (fs.exists(rootdir)) { if (fs.delete(rootdir,true)) { LOG.info("Cleaned up existing " + rootdir); } } byte[] row_70001=Bytes.toBytes("row_70001"); byte[] row_80001=Bytes.toBytes("row_80001"); new FSTableDescriptors(UTIL.getConfiguration(),fs,rootdir).createTableDescriptor(desc); HRegion[] regions={createRegion(desc,null,row_70001,1,70000,rootdir),createRegion(desc,row_70001,row_80001,70001,10000,rootdir),createRegion(desc,row_80001,null,80001,11000,rootdir)}; setupMeta(rootdir,regions); try { LOG.info("Starting mini zk cluster"); UTIL.startMiniZKCluster(); LOG.info("Starting mini hbase cluster"); UTIL.startMiniHBaseCluster(1,1); Configuration c=new Configuration(UTIL.getConfiguration()); Connection connection=UTIL.getConnection(); List originalTableRegions=MetaTableAccessor.getTableRegions(connection,desc.getTableName()); LOG.info("originalTableRegions size=" + originalTableRegions.size() + "; "+ originalTableRegions); Admin admin=connection.getAdmin(); admin.disableTable(desc.getTableName()); admin.close(); HMerge.merge(c,FileSystem.get(c),desc.getTableName()); List postMergeTableRegions=MetaTableAccessor.getTableRegions(connection,desc.getTableName()); LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() + "; "+ postMergeTableRegions); assertTrue("originalTableRegions=" + originalTableRegions.size() + ", postMergeTableRegions="+ postMergeTableRegions.size(),postMergeTableRegions.size() < originalTableRegions.size()); LOG.info("Done with merge"); } finally { UTIL.shutdownMiniCluster(); LOG.info("After cluster shutdown"); } }

Class: org.apache.hadoop.hbase.util.TestMergeTool

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier 
/** * Test merge tool. * @throws Exception */ @Test public void testMergeTool() throws Exception { for (int i=0; i < regions.length; i++) { for (int j=0; j < rows[i].length; j++) { Get get=new Get(rows[i][j]); get.addFamily(FAMILY); Result result=regions[i].get(get); byte[] bytes=CellUtil.cloneValue(result.rawCells()[0]); assertNotNull(bytes); assertTrue(Bytes.equals(bytes,rows[i][j])); } HBaseTestingUtility.closeRegionAndWAL(regions[i]); } WAL log=wals.getWAL(new byte[]{},null); HRegion merged=mergeAndVerify("merging regions 0 and 1 ",this.sourceRegions[0].getRegionNameAsString(),this.sourceRegions[1].getRegionNameAsString(),log,2); merged=mergeAndVerify("merging regions 0+1 and 2",merged.getRegionInfo().getRegionNameAsString(),this.sourceRegions[2].getRegionNameAsString(),log,3); merged=mergeAndVerify("merging regions 0+1+2 and 3",merged.getRegionInfo().getRegionNameAsString(),this.sourceRegions[3].getRegionNameAsString(),log,4); merged=mergeAndVerify("merging regions 0+1+2+3 and 4",merged.getRegionInfo().getRegionNameAsString(),this.sourceRegions[4].getRegionNameAsString(),log,rows.length); }

Class: org.apache.hadoop.hbase.util.TestOrderedBytes

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test int8 encoding. */ @Test public void testInt8(){ Byte[] vals={Byte.MIN_VALUE,Byte.MIN_VALUE / 2,0,Byte.MAX_VALUE / 2,Byte.MAX_VALUE}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[2 + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,2 + 1); buf1.setPosition(1); assertEquals("Surprising return value.",2,OrderedBytes.encodeInt8(buf1,vals[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",2,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",2,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",2,buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",vals[i].byteValue(),OrderedBytes.decodeInt8(buf1)); assertEquals("Did not consume enough bytes.",2,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][2]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { OrderedBytes.encodeInt8(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Byte[] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { int decoded=OrderedBytes.decodeInt8(pbr.set(encoded[i])); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),sortedVals[i].byteValue(),decoded); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test int32 encoding. */ @Test public void testInt32(){ Integer[] vals={Integer.MIN_VALUE,Integer.MIN_VALUE / 2,0,Integer.MAX_VALUE / 2,Integer.MAX_VALUE}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[5 + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,5 + 1); buf1.setPosition(1); assertEquals("Surprising return value.",5,OrderedBytes.encodeInt32(buf1,vals[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",5,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",5,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",5,buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",vals[i].intValue(),OrderedBytes.decodeInt32(buf1)); assertEquals("Did not consume enough bytes.",5,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][5]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { OrderedBytes.encodeInt32(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Integer[] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { int decoded=OrderedBytes.decodeInt32(pbr.set(encoded[i])); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),sortedVals[i].intValue(),decoded); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test BlobCopy encoding. */ @Test public void testBlobCopy(){ byte[][] vals={"".getBytes(),"foo".getBytes(),"foobarbazbub".getBytes(),{(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa},{(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55}}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for ( byte[] val : vals) { int expectedLen=val.length + (Order.ASCENDING == ord ? 1 : 2); byte[] a=new byte[expectedLen + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,expectedLen + 1); buf1.setPosition(1); assertEquals("Surprising return value.",expectedLen,OrderedBytes.encodeBlobCopy(buf1,val,ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",expectedLen,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",expectedLen,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",expectedLen,buf1.getPosition() - 1); buf1.setPosition(1); assertArrayEquals("Deserialization failed.",val,OrderedBytes.decodeBlobCopy(buf1)); assertEquals("Did not consume enough bytes.",expectedLen,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { encoded[i]=new byte[vals[i].length + (Order.ASCENDING == ord ? 1 : 2)]; OrderedBytes.encodeBlobCopy(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); byte[][] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals,Bytes.BYTES_COMPARATOR); else Arrays.sort(sortedVals,Collections.reverseOrder(Bytes.BYTES_COMPARATOR)); for (int i=0; i < sortedVals.length; i++) { pbr.set(encoded[i]); byte[] decoded=OrderedBytes.decodeBlobCopy(pbr); assertArrayEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",Arrays.toString(sortedVals[i]),Arrays.toString(decoded),ord),sortedVals[i],decoded); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[] a=new byte[3 + (Order.ASCENDING == ord ? 1 : 2) + 2]; PositionedByteRange buf=new SimplePositionedMutableByteRange(a,1,3 + (Order.ASCENDING == ord ? 1 : 2)); OrderedBytes.encodeBlobCopy(buf,"foobarbaz".getBytes(),3,3,ord); buf.setPosition(0); assertArrayEquals("bar".getBytes(),OrderedBytes.decodeBlobCopy(buf)); } }

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Tests the variable uint64 encoding. *

* Building sqlite4 with -DVARINT_TOOL provides this reference:
* $ ./varint_tool 240 2287 67823 16777215 4294967295 1099511627775 * 281474976710655 72057594037927935 18446744073709551615
* 240 = f0
* 2287 = f8ff
* 67823 = f9ffff
* 16777215 = faffffff
* 4294967295 = fbffffffff
* 1099511627775 = fcffffffffff
* 281474976710655 = fdffffffffffff
* 72057594037927935 = feffffffffffffff
* 9223372036854775807 = ff7fffffffffffffff (Long.MAX_VAL)
* 9223372036854775808 = ff8000000000000000 (Long.MIN_VAL)
* 18446744073709551615 = ffffffffffffffffff
*

*/ @Test public void testVaruint64Boundaries(){ long vals[]={239L,240L,2286L,2287L,67822L,67823L,16777214L,16777215L,4294967294L,4294967295L,1099511627774L,1099511627775L,281474976710654L,281474976710655L,72057594037927934L,72057594037927935L,Long.MAX_VALUE - 1,Long.MAX_VALUE,Long.MIN_VALUE + 1,Long.MIN_VALUE,-2L,-1L}; int lens[]={1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,9,9,9,9}; assertEquals("Broken test!",vals.length,lens.length); for ( boolean comp : new boolean[]{true,false}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[lens[i] + 2]; PositionedByteRange buf=new SimplePositionedMutableByteRange(a,1,lens[i]); assertEquals("Surprising return value.",lens[i],OrderedBytes.putVaruint64(buf,vals[i],comp)); assertEquals("Surprising serialized length.",lens[i],buf.getPosition()); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf.setPosition(0); assertEquals("Surprising return value.",lens[i],OrderedBytes.skipVaruint64(buf,comp)); assertEquals("Did not skip enough bytes.",lens[i],buf.getPosition()); buf.setPosition(0); assertEquals("Deserialization failed.",vals[i],OrderedBytes.getVaruint64(buf,comp)); assertEquals("Did not consume enough bytes.",lens[i],buf.getPosition()); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test string encoding. */ @Test public void testString(){ String[] vals={"foo","baaaar","bazz"}; int expectedLengths[]={5,8,6}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[expectedLengths[i] + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,expectedLengths[i] + 1); buf1.setPosition(1); assertEquals("Surprising return value.",expectedLengths[i],OrderedBytes.encodeString(buf1,vals[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",expectedLengths[i],buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",expectedLengths[i],OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",expectedLengths[i],buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",vals[i],OrderedBytes.decodeString(buf1)); assertEquals("Did not consume enough bytes.",expectedLengths[i],buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { encoded[i]=new byte[expectedLengths[i]]; OrderedBytes.encodeString(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); String[] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { pbr.set(encoded[i]); String decoded=OrderedBytes.decodeString(pbr); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),sortedVals[i],decoded); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test int16 encoding. */ @Test public void testInt16(){ Short[] vals={Short.MIN_VALUE,Short.MIN_VALUE / 2,0,Short.MAX_VALUE / 2,Short.MAX_VALUE}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[3 + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,3 + 1); buf1.setPosition(1); assertEquals("Surprising return value.",3,OrderedBytes.encodeInt16(buf1,vals[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",3,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",3,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",3,buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",vals[i].shortValue(),OrderedBytes.decodeInt16(buf1)); assertEquals("Did not consume enough bytes.",3,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][3]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { OrderedBytes.encodeInt16(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Short[] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { int decoded=OrderedBytes.decodeInt16(pbr.set(encoded[i])); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),sortedVals[i].shortValue(),decoded); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test int64 encoding. */ @Test public void testInt64(){ Long[] vals={Long.MIN_VALUE,Long.MIN_VALUE / 2,0L,Long.MAX_VALUE / 2,Long.MAX_VALUE}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[9 + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,9 + 1); buf1.setPosition(1); assertEquals("Surprising return value.",9,OrderedBytes.encodeInt64(buf1,vals[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",9,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",9,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",9,buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",vals[i].longValue(),OrderedBytes.decodeInt64(buf1)); assertEquals("Did not consume enough bytes.",9,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][9]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { OrderedBytes.encodeInt64(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Long[] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { long decoded=OrderedBytes.decodeInt64(pbr.set(encoded[i])); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),sortedVals[i].longValue(),decoded); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test integer encoding. Example input values come from reference wiki * page. */ @Test public void testNumericInt(){ for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < I_VALS.length; i++) { byte[] a=new byte[I_LENGTHS[i] + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,I_LENGTHS[i] + 1); buf1.setPosition(1); assertEquals("Surprising return value.",I_LENGTHS[i],OrderedBytes.encodeNumeric(buf1,I_VALS[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",I_LENGTHS[i],buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",I_LENGTHS[i],OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",I_LENGTHS[i],buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",I_VALS[i].longValue(),OrderedBytes.decodeNumericAsLong(buf1)); assertEquals("Did not consume enough bytes.",I_LENGTHS[i],buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[I_VALS.length][]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < I_VALS.length; i++) { encoded[i]=new byte[I_LENGTHS[i]]; OrderedBytes.encodeNumeric(pbr.set(encoded[i]),I_VALS[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Long[] sortedVals=Arrays.copyOf(I_VALS,I_VALS.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { pbr.set(encoded[i]); long decoded=OrderedBytes.decodeNumericAsLong(pbr); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),sortedVals[i].longValue(),decoded); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test BlobVar encoding. */ @Test public void testBlobVar(){ byte[][] vals={"".getBytes(),"foo".getBytes(),"foobarbazbub".getBytes(),{(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa},{(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa},{(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa,(byte)0xaa},{(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55},{(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55},{(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55,(byte)0x55},"1".getBytes(),"22".getBytes(),"333".getBytes(),"4444".getBytes(),"55555".getBytes(),"666666".getBytes(),"7777777".getBytes(),"88888888".getBytes()}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for ( byte[] val : vals) { int expectedLen=OrderedBytes.blobVarEncodedLength(val.length); byte[] a=new byte[expectedLen + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,expectedLen + 1); buf1.setPosition(1); assertEquals("Surprising return value.",expectedLen,OrderedBytes.encodeBlobVar(buf1,val,ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",expectedLen,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",expectedLen,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",expectedLen,buf1.getPosition() - 1); buf1.setPosition(1); assertArrayEquals("Deserialization failed.",val,OrderedBytes.decodeBlobVar(buf1)); assertEquals("Did not consume enough bytes.",expectedLen,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { encoded[i]=new byte[OrderedBytes.blobVarEncodedLength(vals[i].length)]; OrderedBytes.encodeBlobVar(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); byte[][] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals,Bytes.BYTES_COMPARATOR); else Arrays.sort(sortedVals,Collections.reverseOrder(Bytes.BYTES_COMPARATOR)); for (int i=0; i < sortedVals.length; i++) { pbr.set(encoded[i]); byte[] decoded=OrderedBytes.decodeBlobVar(pbr); assertArrayEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",Arrays.toString(sortedVals[i]),Arrays.toString(decoded),ord),sortedVals[i],decoded); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test generic skip logic */ @Test public void testSkip(){ BigDecimal longMax=BigDecimal.valueOf(Long.MAX_VALUE); double negInf=Double.NEGATIVE_INFINITY; BigDecimal negLarge=longMax.multiply(longMax).negate(); BigDecimal negMed=new BigDecimal("-10.0"); BigDecimal negSmall=new BigDecimal("-0.0010"); long zero=0l; BigDecimal posSmall=negSmall.negate(); BigDecimal posMed=negMed.negate(); BigDecimal posLarge=negLarge.negate(); double posInf=Double.POSITIVE_INFINITY; double nan=Double.NaN; byte int8=100; short int16=100; int int32=100; long int64=100l; float float32=100.0f; double float64=100.0d; String text="hello world."; byte[] blobVar=Bytes.toBytes("foo"); byte[] blobCopy=Bytes.toBytes("bar"); for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { PositionedByteRange buff=new SimplePositionedMutableByteRange(30); int o; o=OrderedBytes.encodeNull(buff,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,negInf,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,negLarge,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,negMed,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,negSmall,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,zero,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,posSmall,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,posMed,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,posLarge,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,posInf,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeNumeric(buff,nan,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeInt8(buff,int8,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeInt16(buff,int16,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeInt32(buff,int32,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeInt64(buff,int64,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeFloat32(buff,float32,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeFloat64(buff,float64,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeString(buff,text,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.setPosition(0); o=OrderedBytes.encodeBlobVar(buff,blobVar,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); buff.set(blobCopy.length + (Order.ASCENDING == ord ? 1 : 2)); o=OrderedBytes.encodeBlobCopy(buff,blobCopy,ord); buff.setPosition(0); assertEquals(o,OrderedBytes.skip(buff)); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test float64 encoding. */ @Test public void testFloat64(){ Double[] vals={Double.MIN_VALUE,Double.MIN_VALUE + 1.0,0.0,Double.MAX_VALUE / 2.0,Double.MAX_VALUE}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[9 + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,9 + 1); buf1.setPosition(1); assertEquals("Surprising return value.",9,OrderedBytes.encodeFloat64(buf1,vals[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",9,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",9,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",9,buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",Double.doubleToLongBits(vals[i].doubleValue()),Double.doubleToLongBits(OrderedBytes.decodeFloat64(buf1))); assertEquals("Did not consume enough bytes.",9,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][9]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { OrderedBytes.encodeFloat64(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Double[] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { double decoded=OrderedBytes.decodeFloat64(pbr.set(encoded[i])); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),Double.doubleToLongBits(sortedVals[i].doubleValue()),Double.doubleToLongBits(decoded)); } } }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
/** * Fill gaps in Numeric encoding testing. */ @Test public void testNumericOther(){ for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < BD_VALS.length; i++) { byte[] a=new byte[BD_LENGTHS[i] + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,BD_LENGTHS[i] + 1); buf1.setPosition(1); assertEquals("Surprising return value.",BD_LENGTHS[i],OrderedBytes.encodeNumeric(buf1,BD_VALS[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",BD_LENGTHS[i],buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",BD_LENGTHS[i],OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",BD_LENGTHS[i],buf1.getPosition() - 1); buf1.setPosition(1); BigDecimal decoded=OrderedBytes.decodeNumericAsBigDecimal(buf1); if (null == BD_VALS[i]) { assertEquals(BD_VALS[i],decoded); } else { assertEquals("Deserialization failed.",0,BD_VALS[i].compareTo(decoded)); } assertEquals("Did not consume enough bytes.",BD_LENGTHS[i],buf1.getPosition() - 1); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test real encoding. Example input values come from reference wiki page. */ @Test public void testNumericReal(){ for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < D_VALS.length; i++) { byte[] a=new byte[D_LENGTHS[i] + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,D_LENGTHS[i] + 1); buf1.setPosition(1); assertEquals("Surprising return value.",D_LENGTHS[i],OrderedBytes.encodeNumeric(buf1,D_VALS[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",D_LENGTHS[i],buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",D_LENGTHS[i],OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",D_LENGTHS[i],buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",D_VALS[i].doubleValue(),OrderedBytes.decodeNumericAsDouble(buf1),MIN_EPSILON); assertEquals("Did not consume enough bytes.",D_LENGTHS[i],buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[D_VALS.length][]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < D_VALS.length; i++) { encoded[i]=new byte[D_LENGTHS[i]]; OrderedBytes.encodeNumeric(pbr.set(encoded[i]),D_VALS[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Double[] sortedVals=Arrays.copyOf(D_VALS,D_VALS.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { pbr.set(encoded[i]); double decoded=OrderedBytes.decodeNumericAsDouble(pbr); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),sortedVals[i].doubleValue(),decoded,MIN_EPSILON); } } }

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Verify Real and Int encodings are compatible. */ @Test public void testNumericIntRealCompatibility(){ for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < I_VALS.length; i++) { PositionedByteRange pbri=new SimplePositionedMutableByteRange(I_LENGTHS[i]); PositionedByteRange pbrr=new SimplePositionedMutableByteRange(I_LENGTHS[i]); OrderedBytes.encodeNumeric(pbri,I_VALS[i],ord); OrderedBytes.encodeNumeric(pbrr,I_VALS[i],ord); assertArrayEquals("Integer and real encodings differ.",pbri.getBytes(),pbrr.getBytes()); pbri.setPosition(0); pbrr.setPosition(0); assertEquals((long)I_VALS[i],OrderedBytes.decodeNumericAsLong(pbri)); assertEquals((long)I_VALS[i],(long)OrderedBytes.decodeNumericAsDouble(pbrr)); BigDecimal bd=BigDecimal.valueOf(I_VALS[i]); PositionedByteRange pbrbd=new SimplePositionedMutableByteRange(I_LENGTHS[i]); OrderedBytes.encodeNumeric(pbrbd,bd,ord); assertArrayEquals("Integer and BigDecimal encodings differ.",pbri.getBytes(),pbrbd.getBytes()); pbri.setPosition(0); assertEquals("Value not preserved when decoding as Long",0,bd.compareTo(BigDecimal.valueOf(OrderedBytes.decodeNumericAsLong(pbri)))); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test float32 encoding. */ @Test public void testFloat32(){ Float[] vals={Float.MIN_VALUE,Float.MIN_VALUE + 1.0f,0.0f,Float.MAX_VALUE / 2.0f,Float.MAX_VALUE}; for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { for (int i=0; i < vals.length; i++) { byte[] a=new byte[5 + 3]; PositionedByteRange buf1=new SimplePositionedMutableByteRange(a,1,5 + 1); buf1.setPosition(1); assertEquals("Surprising return value.",5,OrderedBytes.encodeFloat32(buf1,vals[i],ord)); assertEquals("Broken test: serialization did not consume entire buffer.",buf1.getLength(),buf1.getPosition()); assertEquals("Surprising serialized length.",5,buf1.getPosition() - 1); assertEquals("Buffer underflow.",0,a[0]); assertEquals("Buffer underflow.",0,a[1]); assertEquals("Buffer overflow.",0,a[a.length - 1]); buf1.setPosition(1); assertEquals("Surprising return value.",5,OrderedBytes.skip(buf1)); assertEquals("Did not skip enough bytes.",5,buf1.getPosition() - 1); buf1.setPosition(1); assertEquals("Deserialization failed.",Float.floatToIntBits(vals[i].floatValue()),Float.floatToIntBits(OrderedBytes.decodeFloat32(buf1))); assertEquals("Did not consume enough bytes.",5,buf1.getPosition() - 1); } } for ( Order ord : new Order[]{Order.ASCENDING,Order.DESCENDING}) { byte[][] encoded=new byte[vals.length][5]; PositionedByteRange pbr=new SimplePositionedMutableByteRange(); for (int i=0; i < vals.length; i++) { OrderedBytes.encodeFloat32(pbr.set(encoded[i]),vals[i],ord); } Arrays.sort(encoded,Bytes.BYTES_COMPARATOR); Float[] sortedVals=Arrays.copyOf(vals,vals.length); if (ord == Order.ASCENDING) Arrays.sort(sortedVals); else Arrays.sort(sortedVals,Collections.reverseOrder()); for (int i=0; i < sortedVals.length; i++) { float decoded=OrderedBytes.decodeFloat32(pbr.set(encoded[i])); assertEquals(String.format("Encoded representations do not preserve natural order: <%s>, <%s>, %s",sortedVals[i],decoded,ord),Float.floatToIntBits(sortedVals[i].floatValue()),Float.floatToIntBits(decoded)); } } }

Class: org.apache.hadoop.hbase.util.TestRegionMover

InternalCallVerifier EqualityVerifier 
@Test public void testLoadWithAck() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HRegionServer regionServer=cluster.getRegionServer(0); String rsName=regionServer.getServerName().getHostname(); int port=regionServer.getServerName().getPort(); int noRegions=regionServer.getNumberOfOnlineRegions(); String rs=rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder=new RegionMoverBuilder(rs).ack(true).maxthreads(8); RegionMover rm=rmBuilder.build(); rm.setConf(TEST_UTIL.getConfiguration()); LOG.info("Unloading " + rs); rm.unload(); assertEquals(0,regionServer.getNumberOfOnlineRegions()); LOG.info("Successfully Unloaded\nNow Loading"); rm.load(); assertEquals(noRegions,regionServer.getNumberOfOnlineRegions()); }

InternalCallVerifier EqualityVerifier 
/** * Test to unload a regionserver first and then load it using no Ack mode * we check if some regions are loaded on the region server(since no ack is best effort) * @throws Exception */ @Test public void testLoadWithoutAck() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); final HRegionServer regionServer=cluster.getRegionServer(0); String rsName=regionServer.getServerName().getHostname(); int port=regionServer.getServerName().getPort(); int noRegions=regionServer.getNumberOfOnlineRegions(); String rs=rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder=new RegionMoverBuilder(rs).ack(true); RegionMover rm=rmBuilder.build(); rm.setConf(TEST_UTIL.getConfiguration()); LOG.info("Unloading " + rs); rm.unload(); assertEquals(0,regionServer.getNumberOfOnlineRegions()); LOG.info("Successfully Unloaded\nNow Loading"); rm=rmBuilder.ack(false).build(); rm.setConf(TEST_UTIL.getConfiguration()); rm.load(); TEST_UTIL.waitFor(5000,500,new Predicate(){ @Override public boolean evaluate() throws Exception { return regionServer.getNumberOfOnlineRegions() > 0; } } ); }

InternalCallVerifier EqualityVerifier 
/** * To test that we successfully exclude a server from the unloading process We test for the number * of regions on Excluded server and also test that regions are unloaded successfully * @throws Exception */ @Test public void testExclude() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); FileWriter fos=new FileWriter("/tmp/exclude_file"); HRegionServer excludeServer=cluster.getRegionServer(1); String excludeHostname=excludeServer.getServerName().getHostname(); int excludeServerPort=excludeServer.getServerName().getPort(); int regionsExcludeServer=excludeServer.getNumberOfOnlineRegions(); String excludeServerName=excludeHostname + ":" + Integer.toString(excludeServerPort); fos.write(excludeServerName); fos.close(); HRegionServer regionServer=cluster.getRegionServer(0); String rsName=regionServer.getServerName().getHostname(); int port=regionServer.getServerName().getPort(); String rs=rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder=new RegionMoverBuilder(rs).ack(true).excludeFile("/tmp/exclude_file"); RegionMover rm=rmBuilder.build(); rm.setConf(TEST_UTIL.getConfiguration()); rm.unload(); LOG.info("Unloading " + rs); assertEquals(0,regionServer.getNumberOfOnlineRegions()); assertEquals(regionsExcludeServer,cluster.getRegionServer(1).getNumberOfOnlineRegions()); LOG.info("Before:" + regionsExcludeServer + " After:"+ cluster.getRegionServer(1).getNumberOfOnlineRegions()); }

InternalCallVerifier EqualityVerifier 
@Test public void testUnloadWithAck() throws Exception { MiniHBaseCluster cluster=TEST_UTIL.getHBaseCluster(); HRegionServer regionServer=cluster.getRegionServer(0); String rsName=regionServer.getServerName().getHostname(); int port=regionServer.getServerName().getPort(); String rs=rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder=new RegionMoverBuilder(rs).ack(true); RegionMover rm=rmBuilder.build(); rm.setConf(TEST_UTIL.getConfiguration()); rm.unload(); LOG.info("Unloading " + rs); assertEquals(0,regionServer.getNumberOfOnlineRegions()); }

Class: org.apache.hadoop.hbase.util.TestRegionSizeCalculator

InternalCallVerifier EqualityVerifier 
@Test public void testSimpleTestCase() throws Exception { RegionLocator regionLocator=mockRegionLocator("region1","region2","region3"); Admin admin=mockAdmin(mockServer(mockRegion("region1",123),mockRegion("region3",1232)),mockServer(mockRegion("region2",54321),mockRegion("otherTableRegion",110))); RegionSizeCalculator calculator=new RegionSizeCalculator(regionLocator,admin); assertEquals(123 * megabyte,calculator.getRegionSize("region1".getBytes())); assertEquals(54321 * megabyte,calculator.getRegionSize("region2".getBytes())); assertEquals(1232 * megabyte,calculator.getRegionSize("region3".getBytes())); assertEquals(0 * megabyte,calculator.getRegionSize("otherTableRegion".getBytes())); assertEquals(3,calculator.getRegionSizeMap().size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * When calculator is disabled, it should return 0 for each request. */ @Test public void testDisabled() throws Exception { String regionName="cz.goout:/index.html"; RegionLocator table=mockRegionLocator(regionName); Admin admin=mockAdmin(mockServer(mockRegion(regionName,999))); RegionSizeCalculator calculator=new RegionSizeCalculator(table,admin); assertEquals(999 * megabyte,calculator.getRegionSize(regionName.getBytes())); configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR,false); RegionSizeCalculator disabledCalculator=new RegionSizeCalculator(table,admin); assertEquals(0 * megabyte,disabledCalculator.getRegionSize(regionName.getBytes())); assertEquals(0,disabledCalculator.getRegionSizeMap().size()); }

Class: org.apache.hadoop.hbase.util.TestRegionSplitCalculator

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorNoEdge(){ RegionSplitCalculator sc=new RegionSplitCalculator(cmp); Multimap regions=sc.calcCoverage(); LOG.info("Empty"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions); assertEquals("",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBeginEndMarker(){ RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(new SimpleRange(Bytes.toBytes(""),Bytes.toBytes("A"))); sc.add(new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B"))); sc.add(new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes(""))); Multimap regions=sc.calcCoverage(); LOG.info("Special cases -- empty"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1,1,1,0); assertEquals(":\t[, A]\t\n" + "A:\t[A, B]\t\n" + "B:\t[B, ]\t\n"+ "null:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorSingleEdge(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); Multimap regions=sc.calcCoverage(); LOG.info("Single edge"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1,0); assertEquals("A:\t[A, B]\t\n" + "B:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testComplex(){ RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("Am"))); sc.add(new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("C"))); sc.add(new SimpleRange(Bytes.toBytes("Am"),Bytes.toBytes("C"))); sc.add(new SimpleRange(Bytes.toBytes("D"),Bytes.toBytes("E"))); sc.add(new SimpleRange(Bytes.toBytes("F"),Bytes.toBytes("G"))); sc.add(new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("E"))); sc.add(new SimpleRange(Bytes.toBytes("H"),Bytes.toBytes("I"))); sc.add(new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B"))); Multimap regions=sc.calcCoverage(); LOG.info("Something fairly complex"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,3,3,3,1,2,0,1,0,1,0); assertEquals("A:\t[A, Am]\t[A, B]\t[A, C]\t\n" + "Am:\t[A, B]\t[A, C]\t[Am, C]\t\n" + "B:\t[A, C]\t[Am, C]\t[B, E]\t\n"+ "C:\t[B, E]\t\n"+ "D:\t[B, E]\t[D, E]\t\n"+ "E:\t\n"+ "F:\t[F, G]\t\n"+ "G:\t\n"+ "H:\t[H, I]\t\n"+ "I:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorDegenerateEdge(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("A")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); Multimap regions=sc.calcCoverage(); LOG.info("Single empty edge"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1); assertEquals("A:\t[A, A]\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorOverEndpoint(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B")); SimpleRange b=new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("C")); SimpleRange c=new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("D")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); sc.add(c); Multimap regions=sc.calcCoverage(); LOG.info("AB, BD covers BC"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1,2,1,0); assertEquals("A:\t[A, B]\t\n" + "B:\t[B, C]\t[B, D]\t\n" + "C:\t[B, D]\t\n"+ "D:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorOverreach(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("C")); SimpleRange b=new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("D")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); Multimap regions=sc.calcCoverage(); LOG.info("AC and BD overlap but share no start/end keys"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1,2,1,0); assertEquals("A:\t[A, C]\t\n" + "B:\t[A, C]\t[B, D]\t\n" + "C:\t[B, D]\t\n"+ "D:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorEq(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("C")); SimpleRange b=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("C")); LOG.info(a.tiebreaker + " - " + b.tiebreaker); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); Multimap regions=sc.calcCoverage(); LOG.info("AC and AC overlap completely"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,2,0); assertEquals("A:\t[A, C]\t[A, C]\t\n" + "C:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorCoverSplit(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B")); SimpleRange b=new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("C")); SimpleRange c=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("C")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); sc.add(c); Multimap regions=sc.calcCoverage(); LOG.info("AC covers AB, BC"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,2,2,0); assertEquals("A:\t[A, B]\t[A, C]\t\n" + "B:\t[A, C]\t[B, C]\t\n" + "C:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorHoles(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B")); SimpleRange b=new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("C")); SimpleRange c=new SimpleRange(Bytes.toBytes("E"),Bytes.toBytes("F")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); sc.add(c); Multimap regions=sc.calcCoverage(); LOG.info("Hole between C and E"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1,1,0,1,0); assertEquals("A:\t[A, B]\t\n" + "B:\t[B, C]\t\n" + "C:\t\n"+ "E:\t[E, F]\t\n"+ "F:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculator(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B")); SimpleRange b=new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("C")); SimpleRange c=new SimpleRange(Bytes.toBytes("C"),Bytes.toBytes("D")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); sc.add(c); Multimap regions=sc.calcCoverage(); LOG.info("Standard"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1,1,1,0); assertEquals(res,"A:\t[A, B]\t\n" + "B:\t[B, C]\t\n" + "C:\t[C, D]\t\n"+ "D:\t\n"); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorBackwards(){ SimpleRange a=new SimpleRange(Bytes.toBytes("C"),Bytes.toBytes("A")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); Multimap regions=sc.calcCoverage(); LOG.info("CA is backwards"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions); assertEquals("",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorCeil(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("C")); SimpleRange b=new SimpleRange(Bytes.toBytes("B"),Bytes.toBytes("C")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); Multimap regions=sc.calcCoverage(); LOG.info("AC and BC overlap in the end"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,1,2,0); assertEquals("A:\t[A, C]\t\n" + "B:\t[A, C]\t[B, C]\t\n" + "C:\t\n",res); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSplitCalculatorFloor(){ SimpleRange a=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("C")); SimpleRange b=new SimpleRange(Bytes.toBytes("A"),Bytes.toBytes("B")); RegionSplitCalculator sc=new RegionSplitCalculator(cmp); sc.add(a); sc.add(b); Multimap regions=sc.calcCoverage(); LOG.info("AC and AB overlap in the beginning"); String res=dump(sc.getSplits(),regions); checkDepths(sc.getSplits(),regions,2,1,0); assertEquals("A:\t[A, B]\t[A, C]\t\n" + "B:\t[A, C]\t\n" + "C:\t\n",res); }

Class: org.apache.hadoop.hbase.util.TestRegionSplitter

InternalCallVerifier EqualityVerifier 
/** * Unit tests for the UniformSplit algorithm. Makes sure it divides up the space of * keys in the way that we expect. */ @Test public void unitTestUniformSplit(){ UniformSplit splitter=new UniformSplit(); try { splitter.split(1); throw new AssertionError("Splitting into <2 regions should have thrown exception"); } catch ( IllegalArgumentException e) { } byte[][] twoRegionsSplits=splitter.split(2); assertEquals(1,twoRegionsSplits.length); assertArrayEquals(twoRegionsSplits[0],new byte[]{(byte)0x80,0,0,0,0,0,0,0}); byte[][] threeRegionsSplits=splitter.split(3); assertEquals(2,threeRegionsSplits.length); byte[] expectedSplit0=new byte[]{0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55}; assertArrayEquals(expectedSplit0,threeRegionsSplits[0]); byte[] expectedSplit1=new byte[]{(byte)0xAA,(byte)0xAA,(byte)0xAA,(byte)0xAA,(byte)0xAA,(byte)0xAA,(byte)0xAA,(byte)0xAA}; assertArrayEquals(expectedSplit1,threeRegionsSplits[1]); byte[] splitPoint=splitter.split(new byte[]{0x10},new byte[]{0x30}); assertArrayEquals(new byte[]{0x20},splitPoint); byte[] lastRow=new byte[]{xFF,xFF,xFF,xFF,xFF,xFF,xFF,xFF}; assertArrayEquals(lastRow,splitter.lastRow()); byte[] firstRow=ArrayUtils.EMPTY_BYTE_ARRAY; assertArrayEquals(firstRow,splitter.firstRow()); splitPoint=splitter.split(firstRow,new byte[]{0x20}); assertArrayEquals(splitPoint,new byte[]{0x10}); splitPoint=splitter.split(new byte[]{(byte)0xdf,xFF,xFF,xFF,xFF,xFF,xFF,xFF},lastRow); assertArrayEquals(splitPoint,new byte[]{(byte)0xef,xFF,xFF,xFF,xFF,xFF,xFF,xFF}); splitPoint=splitter.split(new byte[]{'a','a','a'},new byte[]{'a','a','b'}); assertArrayEquals(splitPoint,new byte[]{'a','a','a',(byte)0x80}); }

InternalCallVerifier EqualityVerifier 
/** * Unit tests for the HexStringSplit algorithm. Makes sure it divides up the * space of keys in the way that we expect. */ @Test public void unitTestHexStringSplit(){ HexStringSplit splitter=new HexStringSplit(); byte[][] twoRegionsSplits=splitter.split(2); assertEquals(1,twoRegionsSplits.length); assertArrayEquals(twoRegionsSplits[0],"80000000".getBytes()); byte[][] threeRegionsSplits=splitter.split(3); assertEquals(2,threeRegionsSplits.length); byte[] expectedSplit0="55555555".getBytes(); assertArrayEquals(expectedSplit0,threeRegionsSplits[0]); byte[] expectedSplit1="aaaaaaaa".getBytes(); assertArrayEquals(expectedSplit1,threeRegionsSplits[1]); byte[] splitPoint=splitter.split("10000000".getBytes(),"30000000".getBytes()); assertArrayEquals("20000000".getBytes(),splitPoint); byte[] lastRow="ffffffff".getBytes(); assertArrayEquals(lastRow,splitter.lastRow()); byte[] firstRow="00000000".getBytes(); assertArrayEquals(firstRow,splitter.firstRow()); splitPoint=splitter.split(firstRow,"20000000".getBytes()); assertArrayEquals(splitPoint,"10000000".getBytes()); splitPoint=splitter.split("dfffffff".getBytes(),lastRow); assertArrayEquals(splitPoint,"efffffff".getBytes()); }

Class: org.apache.hadoop.hbase.util.TestSimpleMutableByteRange

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEmpty(){ Assert.assertTrue(SimpleMutableByteRange.isEmpty(null)); ByteRange r=new SimpleMutableByteRange(); Assert.assertTrue(SimpleMutableByteRange.isEmpty(r)); Assert.assertTrue(r.isEmpty()); r.set(new byte[0]); Assert.assertEquals(0,r.getBytes().length); Assert.assertEquals(0,r.getOffset()); Assert.assertEquals(0,r.getLength()); Assert.assertTrue(Bytes.equals(new byte[0],r.deepCopyToNewArray())); Assert.assertEquals(0,r.compareTo(new SimpleMutableByteRange(new byte[0],0,0))); Assert.assertEquals(0,r.hashCode()); }

InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBasics(){ ByteRange r=new SimpleMutableByteRange(new byte[]{1,3,2}); Assert.assertFalse(SimpleMutableByteRange.isEmpty(r)); Assert.assertNotNull(r.getBytes()); Assert.assertEquals(3,r.getBytes().length); Assert.assertEquals(0,r.getOffset()); Assert.assertEquals(3,r.getLength()); Assert.assertTrue(Bytes.equals(new byte[]{1,3,2},r.deepCopyToNewArray())); Assert.assertNotSame(r.getBytes(),r.deepCopyToNewArray()); Assert.assertTrue(r.hashCode() > 0); Assert.assertEquals(r.hashCode(),r.deepCopy().hashCode()); byte[] destination=new byte[]{-59}; r.deepCopySubRangeTo(2,1,destination,0); Assert.assertTrue(Bytes.equals(new byte[]{2},destination)); r.setLength(1); Assert.assertTrue(Bytes.equals(new byte[]{1},r.deepCopyToNewArray())); r.setLength(2); Assert.assertTrue(Bytes.equals(new byte[]{1,3},r.deepCopyToNewArray())); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPutandGetPrimitiveTypes() throws Exception { ByteRange r=new SimpleMutableByteRange(100); int offset=0; int i1=18, i2=2; short s1=0; long l1=1234L, l2=0; r.putInt(offset,i1); offset+=Bytes.SIZEOF_INT; r.putInt(offset,i2); offset+=Bytes.SIZEOF_INT; r.putShort(offset,s1); offset+=Bytes.SIZEOF_SHORT; r.putLong(offset,l1); offset+=Bytes.SIZEOF_LONG; int len=r.putVLong(offset,l1); offset+=len; len=r.putVLong(offset,l2); offset+=len; len=r.putVLong(offset,Long.MAX_VALUE); offset+=len; len=r.putVLong(offset,Long.MIN_VALUE); offset=0; Assert.assertEquals(i1,r.getInt(offset)); offset+=Bytes.SIZEOF_INT; Assert.assertEquals(i2,r.getInt(offset)); offset+=Bytes.SIZEOF_INT; Assert.assertEquals(s1,r.getShort(offset)); offset+=Bytes.SIZEOF_SHORT; Assert.assertEquals(l1,r.getLong(offset)); offset+=Bytes.SIZEOF_LONG; Assert.assertEquals(l1,r.getVLong(offset)); offset+=SimpleByteRange.getVLongSize(l1); Assert.assertEquals(l2,r.getVLong(offset)); offset+=SimpleByteRange.getVLongSize(l2); Assert.assertEquals(Long.MAX_VALUE,r.getVLong(offset)); offset+=SimpleByteRange.getVLongSize(Long.MAX_VALUE); Assert.assertEquals(Long.MIN_VALUE,r.getVLong(offset)); }

Class: org.apache.hadoop.hbase.util.TestSimplePositionedMutableByteRange

InternalCallVerifier EqualityVerifier 
@Test public void testPutAndGetPrimitiveTypes() throws Exception { PositionedByteRange pbr=new SimplePositionedMutableByteRange(100); int i1=18, i2=2; short s1=0; long l1=1234L; pbr.putInt(i1); pbr.putInt(i2); pbr.putShort(s1); pbr.putLong(l1); pbr.putVLong(0); pbr.putVLong(l1); pbr.putVLong(Long.MAX_VALUE); pbr.putVLong(Long.MIN_VALUE); pbr.setPosition(0); Assert.assertEquals(i1,pbr.getInt()); Assert.assertEquals(i2,pbr.getInt()); Assert.assertEquals(s1,pbr.getShort()); Assert.assertEquals(l1,pbr.getLong()); Assert.assertEquals(0,pbr.getVLong()); Assert.assertEquals(l1,pbr.getVLong()); Assert.assertEquals(Long.MAX_VALUE,pbr.getVLong()); Assert.assertEquals(Long.MIN_VALUE,pbr.getVLong()); }

InternalCallVerifier EqualityVerifier 
@Test public void testPutGetAPIsCompareWithBBAPIs() throws Exception { PositionedByteRange pbr=new SimplePositionedMutableByteRange(100); int i1=-234, i2=2; short s1=0; long l1=1234L; pbr.putInt(i1); pbr.putShort(s1); pbr.putInt(i2); pbr.putLong(l1); pbr.setPosition(0); Assert.assertEquals(i1,pbr.getInt()); Assert.assertEquals(s1,pbr.getShort()); Assert.assertEquals(i2,pbr.getInt()); Assert.assertEquals(l1,pbr.getLong()); ByteBuffer bb=ByteBuffer.wrap(pbr.getBytes()); Assert.assertEquals(i1,bb.getInt()); Assert.assertEquals(s1,bb.getShort()); Assert.assertEquals(i2,bb.getInt()); Assert.assertEquals(l1,bb.getLong()); }

InternalCallVerifier EqualityVerifier 
@Test public void testPosition(){ PositionedByteRange r=new SimplePositionedMutableByteRange(new byte[5],1,3); r.put(Bytes.toBytes("f")[0]).put(Bytes.toBytes("o")[0]).put(Bytes.toBytes("o")[0]); Assert.assertEquals(3,r.getPosition()); Assert.assertArrayEquals(new byte[]{0,Bytes.toBytes("f")[0],Bytes.toBytes("o")[0],Bytes.toBytes("o")[0],0},r.getBytes()); r.setPosition(0); r.put(Bytes.toBytes("f")).put(Bytes.toBytes("o")).put(Bytes.toBytes("o")); Assert.assertEquals(3,r.getPosition()); Assert.assertArrayEquals(new byte[]{0,Bytes.toBytes("f")[0],Bytes.toBytes("o")[0],Bytes.toBytes("o")[0],0},r.getBytes()); r.setPosition(0); Assert.assertEquals(Bytes.toBytes("f")[0],r.get()); Assert.assertEquals(Bytes.toBytes("o")[0],r.get()); Assert.assertEquals(Bytes.toBytes("o")[0],r.get()); r.setPosition(1); Assert.assertEquals(Bytes.toBytes("o")[0],r.get()); r.setPosition(0); byte[] dst=new byte[3]; r.get(dst); Assert.assertArrayEquals(Bytes.toBytes("foo"),dst); r.setPosition(3); }

Class: org.apache.hadoop.hbase.util.TestSortedCopyOnWriteSet

InternalCallVerifier EqualityVerifier 
@Test public void testSorting() throws Exception { SortedCopyOnWriteSet set=new SortedCopyOnWriteSet(); set.add("c"); set.add("d"); set.add("a"); set.add("b"); String[] expected=new String[]{"a","b","c","d"}; String[] stored=set.toArray(new String[4]); assertArrayEquals(expected,stored); set.add("c"); assertEquals(4,set.size()); stored=set.toArray(new String[4]); assertArrayEquals(expected,stored); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIteratorIsolation() throws Exception { SortedCopyOnWriteSet set=new SortedCopyOnWriteSet(Lists.newArrayList("a","b","c","d","e")); Iterator iter=set.iterator(); set.remove("c"); boolean found=false; while (iter.hasNext() && !found) { found="c".equals(iter.next()); } assertTrue(found); iter=set.iterator(); found=false; while (iter.hasNext() && !found) { found="c".equals(iter.next()); } assertFalse(found); iter=set.iterator(); set.add("f"); found=false; while (iter.hasNext() && !found) { String next=iter.next(); found="f".equals(next); } assertFalse(found); iter=set.iterator(); set.addAll(Lists.newArrayList("g","h","i")); found=false; while (iter.hasNext() && !found) { String next=iter.next(); found="g".equals(next) || "h".equals(next) || "i".equals(next); } assertFalse(found); iter=set.iterator(); set.clear(); assertEquals(0,set.size()); int size=0; while (iter.hasNext()) { iter.next(); size++; } assertTrue(size > 0); }

Class: org.apache.hadoop.hbase.util.TestSortedList

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIteratorIsolation() throws Exception { SortedList list=new SortedList(Lists.newArrayList("a","b","c","d","e"),new StringComparator()); Iterator iter=list.iterator(); list.remove("c"); boolean found=false; while (iter.hasNext() && !found) { found="c".equals(iter.next()); } assertTrue(found); iter=list.iterator(); found=false; while (iter.hasNext() && !found) { found="c".equals(iter.next()); } assertFalse(found); iter=list.iterator(); list.add("f"); found=false; while (iter.hasNext() && !found) { String next=iter.next(); found="f".equals(next); } assertFalse(found); iter=list.iterator(); list.addAll(Lists.newArrayList("g","h","i")); found=false; while (iter.hasNext() && !found) { String next=iter.next(); found="g".equals(next) || "h".equals(next) || "i".equals(next); } assertFalse(found); iter=list.iterator(); list.clear(); assertEquals(0,list.size()); int size=0; while (iter.hasNext()) { iter.next(); size++; } assertTrue(size > 0); }

InternalCallVerifier EqualityVerifier 
@Test public void testRandomAccessIsolation() throws Exception { SortedList list=new SortedList(Lists.newArrayList("a","b","c"),new StringComparator()); List innerList=list.get(); assertEquals("a",innerList.get(0)); assertEquals("b",innerList.get(1)); list.clear(); assertEquals("c",innerList.get(2)); }

InternalCallVerifier EqualityVerifier 
@Test public void testSorting() throws Exception { SortedList list=new SortedList(new StringComparator()); list.add("c"); list.add("d"); list.add("a"); list.add("b"); assertEquals(4,list.size()); assertArrayEquals(new String[]{"a","b","c","d"},list.toArray(new String[4])); list.add("c"); assertEquals(5,list.size()); assertArrayEquals(new String[]{"a","b","c","c","d"},list.toArray(new String[5])); list.remove("b"); assertEquals(4,list.size()); assertArrayEquals(new String[]{"a","c","c","d"},list.toArray(new String[4])); list.remove("c"); assertEquals(3,list.size()); assertArrayEquals(new String[]{"a","c","d"},list.toArray(new String[3])); list.remove("a"); assertEquals(2,list.size()); assertArrayEquals(new String[]{"c","d"},list.toArray(new String[2])); }

Class: org.apache.hadoop.hbase.util.TestStealJobQueue

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPoll() throws InterruptedException { stealJobQueue.offer(3); stealFromQueue.offer(10); stealJobQueue.offer(15); stealJobQueue.offer(4); assertEquals(3,stealJobQueue.poll(1,TimeUnit.SECONDS).intValue()); assertEquals(4,stealJobQueue.poll(1,TimeUnit.SECONDS).intValue()); assertEquals("always take from the main queue before trying to steal",15,stealJobQueue.poll(1,TimeUnit.SECONDS).intValue()); assertEquals(10,stealJobQueue.poll(1,TimeUnit.SECONDS).intValue()); assertTrue(stealFromQueue.isEmpty()); assertTrue(stealJobQueue.isEmpty()); assertNull(stealJobQueue.poll(10,TimeUnit.MILLISECONDS)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTake() throws InterruptedException { stealJobQueue.offer(3); stealFromQueue.offer(10); stealJobQueue.offer(15); stealJobQueue.offer(4); assertEquals(3,stealJobQueue.take().intValue()); assertEquals(4,stealJobQueue.take().intValue()); assertEquals("always take from the main queue before trying to steal",15,stealJobQueue.take().intValue()); assertEquals(10,stealJobQueue.take().intValue()); assertTrue(stealFromQueue.isEmpty()); assertTrue(stealJobQueue.isEmpty()); }

Class: org.apache.hadoop.hbase.util.TestWeakObjectPool

InternalCallVerifier IdentityVerifier 
@Test public void testKeys(){ Object obj1=pool.get("a"); Object obj2=pool.get(new String("a")); Assert.assertSame(obj1,obj2); Object obj3=pool.get("b"); Assert.assertNotSame(obj1,obj3); }

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testWeakReference() throws Exception { Object obj1=pool.get("a"); int hash1=System.identityHashCode(obj1); System.gc(); System.gc(); System.gc(); Thread.sleep(10); pool.purge(); Assert.assertEquals(1,pool.size()); Object obj2=pool.get("a"); Assert.assertSame(obj1,obj2); obj1=null; obj2=null; System.gc(); System.gc(); System.gc(); Thread.sleep(10); pool.purge(); Assert.assertEquals(0,pool.size()); Object obj3=pool.get("a"); Assert.assertNotEquals(hash1,System.identityHashCode(obj3)); }

Class: org.apache.hadoop.hbase.util.hbck.OfflineMetaRebuildTestCore

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setUpBefore() throws Exception { TEST_UTIL=new HBaseTestingUtility(); TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers",9192); TEST_UTIL.startMiniCluster(3); conf=TEST_UTIL.getConfiguration(); this.connection=ConnectionFactory.createConnection(conf); assertEquals(0,TEST_UTIL.getHBaseAdmin().listTables().length); table=TableName.valueOf(TABLE_BASE + "-" + tableIdx); tableIdx++; htbl=setupTable(table); populateTable(htbl); assertEquals(5,scanMeta()); LOG.info("Table " + table + " has "+ tableRowCount(conf,table)+ " entries."); assertEquals(16,tableRowCount(conf,table)); TEST_UTIL.getHBaseAdmin().disableTable(table); assertEquals(1,TEST_UTIL.getHBaseAdmin().listTables().length); }

Class: org.apache.hadoop.hbase.util.hbck.TestOfflineMetaRebuildBase

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test(timeout=120000) public void testMetaRebuild() throws Exception { wipeOutMeta(); assertEquals(1,scanMeta()); assertErrors(doFsck(conf,false),new ERROR_CODE[]{ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.shutdownMiniZKCluster(); HBaseFsck fsck=new HBaseFsck(conf); assertTrue(fsck.rebuildMeta(false)); TEST_UTIL.startMiniZKCluster(); TEST_UTIL.restartHBaseCluster(3); try (Connection connection=ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())){ Admin admin=connection.getAdmin(); if (admin.isTableDisabled(table)) admin.enableTable(table); LOG.info("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); LOG.info("No more RIT in ZK, now doing final test verification"); assertEquals(5,scanMeta()); TableName[] tableNames=TEST_UTIL.getHBaseAdmin().listTableNames(); for ( TableName tableName : tableNames) { HTableDescriptor tableDescriptor=TEST_UTIL.getHBaseAdmin().getTableDescriptor(tableName); assertNotNull(tableDescriptor); assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName)); } HTableDescriptor[] htbls=admin.listTables(); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); assertEquals(1,htbls.length); } assertErrors(doFsck(conf,false),new ERROR_CODE[]{}); LOG.info("Table " + table + " has "+ tableRowCount(conf,table)+ " entries."); assertEquals(16,tableRowCount(conf,table)); }

Class: org.apache.hadoop.hbase.util.hbck.TestOfflineMetaRebuildOverlap

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testMetaRebuildOverlapFail() throws Exception { byte[] startKey=splits[0]; byte[] endKey=splits[2]; createRegion(conf,htbl,startKey,endKey); wipeOutMeta(); assertEquals(1,scanMeta()); assertErrors(doFsck(conf,false),new ERROR_CODE[]{ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.shutdownMiniZKCluster(); HBaseFsck fsck=new HBaseFsck(conf); assertFalse(fsck.rebuildMeta(false)); Multimap problems=fsck.getOverlapGroups(table); assertEquals(1,problems.keySet().size()); assertEquals(3,problems.size()); TEST_UTIL.startMiniZKCluster(); TEST_UTIL.restartHBaseCluster(3); LOG.info("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); LOG.info("No more RIT in ZK, now doing final test verification"); int tries=60; while (TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 && tries-- > 0) { LOG.info("Waiting for RIT: " + TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition()); Thread.sleep(1000); } assertEquals(1,scanMeta()); HTableDescriptor[] htbls=getTables(TEST_UTIL.getConfiguration()); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); assertEquals(1,htbls.length); assertErrors(doFsck(conf,false),new ERROR_CODE[]{ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED,ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); }

Class: org.apache.hadoop.hbase.wal.TestBoundedRegionGroupingStrategy

InternalCallVerifier EqualityVerifier 
@Test public void testBoundsGreaterThanDefault() throws Exception { final int temp=conf.getInt(NUM_REGION_GROUPS,DEFAULT_NUM_REGION_GROUPS); try { conf.setInt(NUM_REGION_GROUPS,temp * 4); final String parallelism=Integer.toString(temp * 4); int errCode=WALPerformanceEvaluation.innerMain(new Configuration(conf),new String[]{"-threads",parallelism,"-verify","-noclosefs","-iterations","3000","-regions",parallelism}); assertEquals(0,errCode); } finally { conf.setInt(NUM_REGION_GROUPS,temp); } }

InternalCallVerifier EqualityVerifier 
/** * Ensure that we can use Set.add to deduplicate WALs */ @Test public void setMembershipDedups() throws IOException { final int temp=conf.getInt(NUM_REGION_GROUPS,DEFAULT_NUM_REGION_GROUPS); WALFactory wals=null; try { conf.setInt(NUM_REGION_GROUPS,temp * 4); FSUtils.setRootDir(conf,TEST_UTIL.getDataTestDirOnTestFS()); wals=new WALFactory(conf,null,currentTest.getMethodName()); final Set seen=new HashSet(temp * 4); final Random random=new Random(); int count=0; for (int i=0; i < temp * 8; i++) { final WAL maybeNewWAL=wals.getWAL(Bytes.toBytes(random.nextInt()),null); LOG.info("Iteration " + i + ", checking wal "+ maybeNewWAL); if (seen.add(maybeNewWAL)) { count++; } } assertEquals("received back a different number of WALs that are not equal() to each other " + "than the bound we placed.",temp * 4,count); } finally { if (wals != null) { wals.close(); } conf.setInt(NUM_REGION_GROUPS,temp); } }

InternalCallVerifier EqualityVerifier 
@Test public void testMoreRegionsThanBoundWithBoundsGreaterThanDefault() throws Exception { final int temp=conf.getInt(NUM_REGION_GROUPS,DEFAULT_NUM_REGION_GROUPS); try { conf.setInt(NUM_REGION_GROUPS,temp * 4); final String parallelism=Integer.toString(temp * 4 * 2); int errCode=WALPerformanceEvaluation.innerMain(new Configuration(conf),new String[]{"-threads",parallelism,"-verify","-noclosefs","-iterations","3000","-regions",parallelism}); assertEquals(0,errCode); } finally { conf.setInt(NUM_REGION_GROUPS,temp); } }

Class: org.apache.hadoop.hbase.wal.TestDefaultWALProvider

InternalCallVerifier EqualityVerifier 
/** * Write to a log file with three concurrent threads and verifying all data is written. * @throws Exception */ @Test public void testConcurrentWrites() throws Exception { int errCode=WALPerformanceEvaluation.innerMain(new Configuration(TEST_UTIL.getConfiguration()),new String[]{"-threads","3","-verify","-noclosefs","-iterations","3000"}); assertEquals(0,errCode); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetServerNameFromWALDirectoryName() throws IOException { ServerName sn=ServerName.valueOf("hn",450,1398); String hl=FSUtils.getRootDir(conf) + "/" + DefaultWALProvider.getWALDirectoryName(sn.toString()); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf,null)); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf,FSUtils.getRootDir(conf).toUri().toString())); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf,"")); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf," ")); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf,hl)); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf,hl + "qdf")); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf,"sfqf" + hl + "qdf")); final String wals="/WALs/"; ServerName parsed=DefaultWALProvider.getServerNameFromWALDirectoryName(conf,FSUtils.getRootDir(conf).toUri().toString() + wals + sn+ "/localhost%2C32984%2C1343316388997.1343316390417"); assertEquals("standard",sn,parsed); parsed=DefaultWALProvider.getServerNameFromWALDirectoryName(conf,hl + "/qdf"); assertEquals("subdir",sn,parsed); parsed=DefaultWALProvider.getServerNameFromWALDirectoryName(conf,FSUtils.getRootDir(conf).toUri().toString() + wals + sn+ "-splitting/localhost%3A57020.1340474893931"); assertEquals("split",sn,parsed); }

InternalCallVerifier EqualityVerifier 
/** * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs * and also don't archive "live logs" (that is, a log with un-flushed entries). *

* This is what it does: * It creates two regions, and does a series of inserts along with log rolling. * Whenever a WAL is rolled, HLogBase checks previous wals for archiving. A wal is eligible for * archiving if for all the regions which have entries in that wal file, have flushed - past * their maximum sequence id in that wal file. *

* @throws IOException */ @Test public void testWALArchiving() throws IOException { LOG.debug("testWALArchiving"); HTableDescriptor table1=new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row")); HTableDescriptor table2=new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row")); final Configuration localConf=new Configuration(conf); localConf.set(WALFactory.WAL_PROVIDER,DefaultWALProvider.class.getName()); final WALFactory wals=new WALFactory(localConf,null,currentTest.getMethodName()); try { final WAL wal=wals.getWAL(UNSPECIFIED_REGION,null); assertEquals(0,DefaultWALProvider.getNumRolledLogFiles(wal)); HRegionInfo hri1=new HRegionInfo(table1.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); HRegionInfo hri2=new HRegionInfo(table2.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); hri1.setSplit(false); hri2.setSplit(false); addEdits(wal,hri1,table1,1); wal.rollWriter(); assertEquals(1,DefaultWALProvider.getNumRolledLogFiles(wal)); addEdits(wal,hri1,table1,1); wal.rollWriter(); assertEquals(2,DefaultWALProvider.getNumRolledLogFiles(wal)); addEdits(wal,hri1,table1,3); flushRegion(wal,hri1.getEncodedNameAsBytes(),table1.getFamiliesKeys()); wal.rollWriter(); assertEquals(0,DefaultWALProvider.getNumRolledLogFiles(wal)); addEdits(wal,hri2,table2,1); wal.rollWriter(); assertEquals(1,DefaultWALProvider.getNumRolledLogFiles(wal)); addEdits(wal,hri1,table1,2); wal.rollWriter(); assertEquals(2,DefaultWALProvider.getNumRolledLogFiles(wal)); addEdits(wal,hri2,table2,2); flushRegion(wal,hri1.getEncodedNameAsBytes(),table2.getFamiliesKeys()); wal.rollWriter(); assertEquals(2,DefaultWALProvider.getNumRolledLogFiles(wal)); addEdits(wal,hri2,table2,2); flushRegion(wal,hri2.getEncodedNameAsBytes(),table2.getFamiliesKeys()); wal.rollWriter(); assertEquals(0,DefaultWALProvider.getNumRolledLogFiles(wal)); } finally { if (wals != null) { wals.close(); } } }


InternalCallVerifier EqualityVerifier 
@Test public void testLogCleaning() throws Exception { LOG.info("testLogCleaning"); final HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("testLogCleaning")).addFamily(new HColumnDescriptor("row")); final HTableDescriptor htd2=new HTableDescriptor(TableName.valueOf("testLogCleaning2")).addFamily(new HColumnDescriptor("row")); final Configuration localConf=new Configuration(conf); localConf.set(WALFactory.WAL_PROVIDER,DefaultWALProvider.class.getName()); final WALFactory wals=new WALFactory(localConf,null,currentTest.getMethodName()); final AtomicLong sequenceId=new AtomicLong(1); try { HRegionInfo hri=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); HRegionInfo hri2=new HRegionInfo(htd2.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); final WAL log=wals.getWAL(UNSPECIFIED_REGION,null); addEdits(log,hri,htd,1); log.rollWriter(); assertEquals(1,DefaultWALProvider.getNumRolledLogFiles(log)); addEdits(log,hri,htd,2); log.rollWriter(); assertEquals(2,DefaultWALProvider.getNumRolledLogFiles(log)); addEdits(log,hri,htd,1); addEdits(log,hri2,htd2,1); addEdits(log,hri,htd,1); addEdits(log,hri2,htd2,1); log.rollWriter(); assertEquals(3,DefaultWALProvider.getNumRolledLogFiles(log)); addEdits(log,hri2,htd2,1); log.startCacheFlush(hri.getEncodedNameAsBytes(),htd.getFamiliesKeys()); log.completeCacheFlush(hri.getEncodedNameAsBytes()); log.rollWriter(); assertEquals(2,DefaultWALProvider.getNumRolledLogFiles(log)); addEdits(log,hri2,htd2,1); log.startCacheFlush(hri2.getEncodedNameAsBytes(),htd2.getFamiliesKeys()); log.completeCacheFlush(hri2.getEncodedNameAsBytes()); log.rollWriter(); assertEquals(0,DefaultWALProvider.getNumRolledLogFiles(log)); } finally { if (wals != null) { wals.close(); } } }

IterativeVerifier InternalCallVerifier BooleanVerifier 
/** * Ensure that we can use Set.add to deduplicate WALs */ @Test public void setMembershipDedups() throws IOException { final Configuration localConf=new Configuration(conf); localConf.set(WALFactory.WAL_PROVIDER,DefaultWALProvider.class.getName()); final WALFactory wals=new WALFactory(localConf,null,currentTest.getMethodName()); try { final Set seen=new HashSet(1); final Random random=new Random(); assertTrue("first attempt to add WAL from default provider should work.",seen.add(wals.getWAL(Bytes.toBytes(random.nextInt()),null))); for (int i=0; i < 1000; i++) { assertFalse("default wal provider is only supposed to return a single wal, which should " + "compare as .equals itself.",seen.add(wals.getWAL(Bytes.toBytes(random.nextInt()),null))); } } finally { wals.close(); } }

Class: org.apache.hadoop.hbase.wal.TestSecureWAL

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSecureWAL() throws Exception { TableName tableName=TableName.valueOf("TestSecureWAL"); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); HRegionInfo regioninfo=new HRegionInfo(tableName,HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,false); final int total=10; final byte[] row=Bytes.toBytes("row"); final byte[] family=Bytes.toBytes("family"); final byte[] value=Bytes.toBytes("Test value"); FileSystem fs=TEST_UTIL.getTestFileSystem(); final WALFactory wals=new WALFactory(TEST_UTIL.getConfiguration(),null,"TestSecureWAL"); final WAL wal=wals.getWAL(regioninfo.getEncodedNameAsBytes(),regioninfo.getTable().getNamespace()); for (int i=0; i < total; i++) { WALEdit kvs=new WALEdit(); kvs.add(new KeyValue(row,family,Bytes.toBytes(i),value)); wal.append(htd,regioninfo,new WALKey(regioninfo.getEncodedNameAsBytes(),tableName,System.currentTimeMillis()),kvs,true); } wal.sync(); final Path walPath=DefaultWALProvider.getCurrentFileName(wal); wals.shutdown(); long length=fs.getFileStatus(walPath).getLen(); FSDataInputStream in=fs.open(walPath); byte[] fileData=new byte[(int)length]; IOUtils.readFully(in,fileData); in.close(); assertFalse("Cells appear to be plaintext",Bytes.contains(fileData,value)); WAL.Reader reader=wals.createReader(TEST_UTIL.getTestFileSystem(),walPath); int count=0; WAL.Entry entry=new WAL.Entry(); while (reader.next(entry) != null) { count++; List cells=entry.getEdit().getCells(); assertTrue("Should be one KV per WALEdit",cells.size() == 1); for ( Cell cell : cells) { assertTrue("Incorrect row",Bytes.equals(cell.getRowArray(),cell.getRowOffset(),cell.getRowLength(),row,0,row.length)); assertTrue("Incorrect family",Bytes.equals(cell.getFamilyArray(),cell.getFamilyOffset(),cell.getFamilyLength(),family,0,family.length)); assertTrue("Incorrect value",Bytes.equals(cell.getValueArray(),cell.getValueOffset(),cell.getValueLength(),value,0,value.length)); } } assertEquals("Should have read back as many KVs as written",total,count); reader.close(); }

Class: org.apache.hadoop.hbase.wal.TestWALFactory

InternalCallVerifier NullVerifier 
/** * A loaded WAL coprocessor won't break existing WAL test cases. */ @Test public void testWALCoprocessorLoaded() throws Exception { WALCoprocessorHost host=wals.getWAL(UNSPECIFIED_REGION,null).getCoprocessorHost(); Coprocessor c=host.findCoprocessor(SampleRegionWALObserver.class.getName()); assertNotNull(c); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test new HDFS-265 sync. * @throws Exception */ @Test public void Broken_testSync() throws Exception { TableName tableName=TableName.valueOf(currentTest.getMethodName()); MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(1); Path p=new Path(dir,currentTest.getMethodName() + ".fsdos"); FSDataOutputStream out=fs.create(p); out.write(tableName.getName()); Method syncMethod=null; try { syncMethod=out.getClass().getMethod("hflush",new Class[]{}); } catch ( NoSuchMethodException e) { try { syncMethod=out.getClass().getMethod("sync",new Class[]{}); } catch ( NoSuchMethodException ex) { fail("This version of Hadoop supports neither Syncable.sync() " + "nor Syncable.hflush()."); } } syncMethod.invoke(out,new Object[]{}); FSDataInputStream in=fs.open(p); assertTrue(in.available() > 0); byte[] buffer=new byte[1024]; int read=in.read(buffer); assertEquals(tableName.getName().length,read); out.close(); in.close(); final int total=20; WAL.Reader reader=null; try { HRegionInfo info=new HRegionInfo(tableName,null,null,false); HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); final WAL wal=wals.getWAL(info.getEncodedNameAsBytes(),info.getTable().getNamespace()); for (int i=0; i < total; i++) { WALEdit kvs=new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i),tableName.getName(),tableName.getName())); wal.append(htd,info,new WALKey(info.getEncodedNameAsBytes(),tableName,System.currentTimeMillis(),mvcc),kvs,true); } wal.sync(); Path walPath=DefaultWALProvider.getCurrentFileName(wal); reader=wals.createReader(fs,walPath); int count=0; WAL.Entry entry=new WAL.Entry(); while ((entry=reader.next(entry)) != null) count++; assertEquals(total,count); reader.close(); for (int i=0; i < total; i++) { WALEdit kvs=new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i),tableName.getName(),tableName.getName())); wal.append(htd,info,new WALKey(info.getEncodedNameAsBytes(),tableName,System.currentTimeMillis(),mvcc),kvs,true); } wal.sync(); reader=wals.createReader(fs,walPath); count=0; while ((entry=reader.next(entry)) != null) count++; assertTrue(count >= total); reader.close(); wal.sync(); reader=wals.createReader(fs,walPath); count=0; while ((entry=reader.next(entry)) != null) count++; assertEquals(total * 2,count); reader.close(); final byte[] value=new byte[1025 * 1024]; for (int i=0; i < total; i++) { WALEdit kvs=new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i),tableName.getName(),value)); wal.append(htd,info,new WALKey(info.getEncodedNameAsBytes(),tableName,System.currentTimeMillis(),mvcc),kvs,true); } wal.sync(); reader=wals.createReader(fs,walPath); count=0; while ((entry=reader.next(entry)) != null) count++; assertEquals(total * 3,count); reader.close(); wal.shutdown(); reader=wals.createReader(fs,walPath); count=0; while ((entry=reader.next(entry)) != null) count++; assertEquals(total * 3,count); reader.close(); } finally { if (reader != null) reader.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * @throws IOException */ @Test public void testAppend() throws IOException { final int COL_COUNT=10; final HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("tablename")).addFamily(new HColumnDescriptor("column")); final byte[] row=Bytes.toBytes("row"); WAL.Reader reader=null; final MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(1); try { long timestamp=System.currentTimeMillis(); WALEdit cols=new WALEdit(); for (int i=0; i < COL_COUNT; i++) { cols.add(new KeyValue(row,Bytes.toBytes("column"),Bytes.toBytes(Integer.toString(i)),timestamp,new byte[]{(byte)(i + '0')})); } HRegionInfo hri=new HRegionInfo(htd.getTableName(),HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); final WAL log=wals.getWAL(hri.getEncodedNameAsBytes(),hri.getTable().getNamespace()); final long txid=log.append(htd,hri,new WALKey(hri.getEncodedNameAsBytes(),htd.getTableName(),System.currentTimeMillis(),mvcc),cols,true); log.sync(txid); log.startCacheFlush(hri.getEncodedNameAsBytes(),htd.getFamiliesKeys()); log.completeCacheFlush(hri.getEncodedNameAsBytes()); log.shutdown(); Path filename=DefaultWALProvider.getCurrentFileName(log); reader=wals.createReader(fs,filename); WAL.Entry entry=reader.next(); assertEquals(COL_COUNT,entry.getEdit().size()); int idx=0; for ( Cell val : entry.getEdit().getCells()) { assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),entry.getKey().getEncodedRegionName())); assertTrue(htd.getTableName().equals(entry.getKey().getTablename())); assertTrue(Bytes.equals(row,0,row.length,val.getRowArray(),val.getRowOffset(),val.getRowLength())); assertEquals((byte)(idx + '0'),CellUtil.cloneValue(val)[0]); System.out.println(entry.getKey() + " " + val); idx++; } } finally { if (reader != null) { reader.close(); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that we can write out an edit, close, and then read it back in again. * @throws IOException */ @Test public void testEditAdd() throws IOException { final int COL_COUNT=10; final HTableDescriptor htd=new HTableDescriptor(TableName.valueOf("tablename")).addFamily(new HColumnDescriptor("column")); final byte[] row=Bytes.toBytes("row"); WAL.Reader reader=null; try { final MultiVersionConcurrencyControl mvcc=new MultiVersionConcurrencyControl(1); long timestamp=System.currentTimeMillis(); WALEdit cols=new WALEdit(); for (int i=0; i < COL_COUNT; i++) { cols.add(new KeyValue(row,Bytes.toBytes("column"),Bytes.toBytes(Integer.toString(i)),timestamp,new byte[]{(byte)(i + '0')})); } HRegionInfo info=new HRegionInfo(htd.getTableName(),row,Bytes.toBytes(Bytes.toString(row) + "1"),false); final WAL log=wals.getWAL(info.getEncodedNameAsBytes(),info.getTable().getNamespace()); final long txid=log.append(htd,info,new WALKey(info.getEncodedNameAsBytes(),htd.getTableName(),System.currentTimeMillis(),mvcc),cols,true); log.sync(txid); log.startCacheFlush(info.getEncodedNameAsBytes(),htd.getFamiliesKeys()); log.completeCacheFlush(info.getEncodedNameAsBytes()); log.shutdown(); Path filename=DefaultWALProvider.getCurrentFileName(log); reader=wals.createReader(fs,filename); for (int i=0; i < 1; i++) { WAL.Entry entry=reader.next(null); if (entry == null) break; WALKey key=entry.getKey(); WALEdit val=entry.getEdit(); assertTrue(Bytes.equals(info.getEncodedNameAsBytes(),key.getEncodedRegionName())); assertTrue(htd.getTableName().equals(key.getTablename())); Cell cell=val.getCells().get(0); assertTrue(Bytes.equals(row,0,row.length,cell.getRowArray(),cell.getRowOffset(),cell.getRowLength())); assertEquals((byte)(i + '0'),CellUtil.cloneValue(cell)[0]); System.out.println(key + " " + val); } } finally { if (reader != null) { reader.close(); } } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testAppendClose() throws Exception { TableName tableName=TableName.valueOf(currentTest.getMethodName()); HRegionInfo regioninfo=new HRegionInfo(tableName,HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW,false); final WAL wal=wals.getWAL(regioninfo.getEncodedNameAsBytes(),regioninfo.getTable().getNamespace()); final int total=20; HTableDescriptor htd=new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); for (int i=0; i < total; i++) { WALEdit kvs=new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i),tableName.getName(),tableName.getName())); wal.append(htd,regioninfo,new WALKey(regioninfo.getEncodedNameAsBytes(),tableName,System.currentTimeMillis()),kvs,true); } wal.sync(); int namenodePort=cluster.getNameNodePort(); final Path walPath=DefaultWALProvider.getCurrentFileName(wal); try { DistributedFileSystem dfs=(DistributedFileSystem)cluster.getFileSystem(); dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); TEST_UTIL.shutdownMiniDFSCluster(); try { wal.shutdown(); } catch ( IOException e) { LOG.info(e); } fs.close(); LOG.info("STOPPED first instance of the cluster"); } finally { while (cluster.isClusterUp()) { LOG.error("Waiting for cluster to go down"); Thread.sleep(1000); } assertFalse(cluster.isClusterUp()); cluster=null; for (int i=0; i < 100; i++) { try { cluster=TEST_UTIL.startMiniDFSClusterForTestWAL(namenodePort); break; } catch ( BindException e) { LOG.info("Sleeping. BindException bringing up new cluster"); Threads.sleep(1000); } } cluster.waitActive(); fs=cluster.getFileSystem(); LOG.info("STARTED second instance."); } Method setLeasePeriod=cluster.getClass().getDeclaredMethod("setLeasePeriod",new Class[]{Long.TYPE,Long.TYPE}); setLeasePeriod.setAccessible(true); setLeasePeriod.invoke(cluster,1000L,1000L); try { Thread.sleep(1000); } catch ( InterruptedException e) { LOG.info(e); } final FileSystem recoveredFs=fs; final Configuration rlConf=conf; class RecoverLogThread extends Thread { public Exception exception=null; public void run(){ try { FSUtils.getInstance(fs,rlConf).recoverFileLease(recoveredFs,walPath,rlConf,null); } catch ( IOException e) { exception=e; } } } RecoverLogThread t=new RecoverLogThread(); t.start(); t.join(60 * 1000); if (t.isAlive()) { t.interrupt(); throw new Exception("Timed out waiting for WAL.recoverLog()"); } if (t.exception != null) throw t.exception; WAL.Reader reader=wals.createReader(fs,walPath); int count=0; WAL.Entry entry=new WAL.Entry(); while (reader.next(entry) != null) { count++; assertTrue("Should be one KeyValue per WALEdit",entry.getEdit().getCells().size() == 1); } assertEquals(total,count); reader.close(); setLeasePeriod.invoke(cluster,new Object[]{new Long(60000),new Long(3600000)}); }

IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * @throws IOException */ @Test public void testReadLegacyLog() throws IOException { final int columnCount=5; final int recordCount=5; final TableName tableName=TableName.valueOf("tablename"); final byte[] row=Bytes.toBytes("row"); long timestamp=System.currentTimeMillis(); Path path=new Path(dir,"tempwal"); SequenceFileLogWriter sflw=null; WAL.Reader reader=null; try { HRegionInfo hri=new HRegionInfo(tableName,HConstants.EMPTY_START_ROW,HConstants.EMPTY_END_ROW); HTableDescriptor htd=new HTableDescriptor(tableName); fs.mkdirs(dir); sflw=new SequenceFileLogWriter(); sflw.init(fs,path,conf,false); for (int i=0; i < recordCount; ++i) { WALKey key=new HLogKey(hri.getEncodedNameAsBytes(),tableName,i,timestamp,HConstants.DEFAULT_CLUSTER_ID); WALEdit edit=new WALEdit(); for (int j=0; j < columnCount; ++j) { if (i == 0) { htd.addFamily(new HColumnDescriptor("column" + j)); } String value=i + "" + j; edit.add(new KeyValue(row,row,row,timestamp,Bytes.toBytes(value))); } sflw.append(new WAL.Entry(key,edit)); } sflw.sync(); sflw.close(); reader=wals.createReader(fs,path); assertTrue(reader instanceof SequenceFileLogReader); for (int i=0; i < recordCount; ++i) { WAL.Entry entry=reader.next(); assertNotNull(entry); assertEquals(columnCount,entry.getEdit().size()); assertArrayEquals(hri.getEncodedNameAsBytes(),entry.getKey().getEncodedRegionName()); assertEquals(tableName,entry.getKey().getTablename()); int idx=0; for ( Cell val : entry.getEdit().getCells()) { assertTrue(Bytes.equals(row,0,row.length,val.getRowArray(),val.getRowOffset(),val.getRowLength())); String value=i + "" + idx; assertArrayEquals(Bytes.toBytes(value),CellUtil.cloneValue(val)); idx++; } } WAL.Entry entry=reader.next(); assertNull(entry); } finally { if (sflw != null) { sflw.close(); } if (reader != null) { reader.close(); } } }

Class: org.apache.hadoop.hbase.wal.TestWALFiltering

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test public void testFlushedSequenceIdsSentToHMaster() throws IOException, InterruptedException, ServiceException { SortedMap allFlushedSequenceIds=new TreeMap(Bytes.BYTES_COMPARATOR); for (int i=0; i < NUM_RS; ++i) { flushAllRegions(i); } Thread.sleep(10000); HMaster master=TEST_UTIL.getMiniHBaseCluster().getMaster(); for (int i=0; i < NUM_RS; ++i) { for ( byte[] regionName : getRegionsByServer(i)) { if (allFlushedSequenceIds.containsKey(regionName)) { GetLastFlushedSequenceIdRequest req=RequestConverter.buildGetLastFlushedSequenceIdRequest(regionName); assertEquals((long)allFlushedSequenceIds.get(regionName),master.getMasterRpcServices().getLastFlushedSequenceId(null,req).getLastFlushedSequenceId()); } } } }

Class: org.apache.hadoop.hbase.wal.TestWALMethods

InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testEntrySink() throws Exception { Configuration conf=new Configuration(); RecoveryMode mode=(conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING); EntryBuffers sink=new EntryBuffers(new PipelineController(),1 * 1024 * 1024); for (int i=0; i < 1000; i++) { WAL.Entry entry=createTestLogEntry(i); sink.appendEntry(entry); } assertTrue(sink.totalBuffered > 0); long amountInChunk=sink.totalBuffered; RegionEntryBuffer chunk=sink.getChunkToWrite(); assertEquals(chunk.heapSize(),amountInChunk); assertTrue(sink.isRegionCurrentlyWriting(TEST_REGION)); for (int i=0; i < 500; i++) { WAL.Entry entry=createTestLogEntry(i); sink.appendEntry(entry); } assertNull(sink.getChunkToWrite()); sink.doneWriting(chunk); RegionEntryBuffer chunk2=sink.getChunkToWrite(); assertNotNull(chunk2); assertNotSame(chunk,chunk2); long amountInChunk2=sink.totalBuffered; assertTrue(amountInChunk2 < amountInChunk); sink.doneWriting(chunk2); assertEquals(0,sink.totalBuffered); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Assert that getSplitEditFilesSorted returns files in expected order and * that it skips moved-aside files. * @throws IOException */ @Test public void testGetSplitEditFilesSorted() throws IOException { FileSystem fs=FileSystem.get(util.getConfiguration()); Path regiondir=util.getDataTestDir("regiondir"); fs.delete(regiondir,true); fs.mkdirs(regiondir); Path recoverededits=WALSplitter.getRegionDirRecoveredEditsDir(regiondir); String first=WALSplitter.formatRecoveredEditsFileName(-1); createFile(fs,recoverededits,first); createFile(fs,recoverededits,WALSplitter.formatRecoveredEditsFileName(0)); createFile(fs,recoverededits,WALSplitter.formatRecoveredEditsFileName(1)); createFile(fs,recoverededits,WALSplitter.formatRecoveredEditsFileName(11)); createFile(fs,recoverededits,WALSplitter.formatRecoveredEditsFileName(2)); createFile(fs,recoverededits,WALSplitter.formatRecoveredEditsFileName(50)); String last=WALSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE); createFile(fs,recoverededits,last); createFile(fs,recoverededits,Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis()); final Configuration walConf=new Configuration(util.getConfiguration()); FSUtils.setRootDir(walConf,regiondir); (new WALFactory(walConf,null,"dummyLogName")).getWAL(new byte[]{},null); NavigableSet files=WALSplitter.getSplitEditFilesSorted(fs,regiondir); assertEquals(7,files.size()); assertEquals(files.pollFirst().getName(),first); assertEquals(files.pollLast().getName(),last); assertEquals(files.pollFirst().getName(),WALSplitter.formatRecoveredEditsFileName(0)); assertEquals(files.pollFirst().getName(),WALSplitter.formatRecoveredEditsFileName(1)); assertEquals(files.pollFirst().getName(),WALSplitter.formatRecoveredEditsFileName(2)); assertEquals(files.pollFirst().getName(),WALSplitter.formatRecoveredEditsFileName(11)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRegionEntryBuffer() throws Exception { WALSplitter.RegionEntryBuffer reb=new WALSplitter.RegionEntryBuffer(TEST_TABLE,TEST_REGION); assertEquals(0,reb.heapSize()); reb.appendEntry(createTestLogEntry(1)); assertTrue(reb.heapSize() > 0); }

Class: org.apache.hadoop.hbase.wal.TestWALReaderOnSecureWAL

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test() public void testSecureWALReaderOnWAL() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.setClass("hbase.regionserver.hlog.reader.impl",SecureProtobufLogReader.class,WAL.Reader.class); conf.setClass("hbase.regionserver.hlog.writer.impl",ProtobufLogWriter.class,WALProvider.Writer.class); conf.setBoolean(WAL_ENCRYPTION,false); FileSystem fs=TEST_UTIL.getTestFileSystem(); final WALFactory wals=new WALFactory(conf,null,currentTest.getMethodName()); Path walPath=writeWAL(wals,currentTest.getMethodName()); long length=fs.getFileStatus(walPath).getLen(); FSDataInputStream in=fs.open(walPath); byte[] fileData=new byte[(int)length]; IOUtils.readFully(in,fileData); in.close(); assertTrue("Cells should be plaintext",Bytes.contains(fileData,value)); try { WAL.Reader reader=wals.createReader(TEST_UTIL.getTestFileSystem(),walPath); reader.close(); } catch ( IOException ioe) { assertFalse(true); } FileStatus[] listStatus=fs.listStatus(walPath.getParent()); RecoveryMode mode=(conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING); Path rootdir=FSUtils.getRootDir(conf); try { WALSplitter s=new WALSplitter(wals,conf,rootdir,fs,null,null,mode); s.splitLogFile(listStatus[0],null); Path file=new Path(ZKSplitLog.getSplitLogDir(rootdir,listStatus[0].getPath().getName()),"corrupt"); assertTrue(!fs.exists(file)); } catch ( IOException ioe) { assertTrue("WAL should have been processed",false); } wals.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test() public void testWALReaderOnSecureWAL() throws Exception { Configuration conf=TEST_UTIL.getConfiguration(); conf.setClass("hbase.regionserver.hlog.reader.impl",ProtobufLogReader.class,WAL.Reader.class); conf.setClass("hbase.regionserver.hlog.writer.impl",SecureProtobufLogWriter.class,WALProvider.Writer.class); conf.setBoolean(WAL_ENCRYPTION,true); FileSystem fs=TEST_UTIL.getTestFileSystem(); final WALFactory wals=new WALFactory(conf,null,currentTest.getMethodName()); Path walPath=writeWAL(wals,currentTest.getMethodName()); long length=fs.getFileStatus(walPath).getLen(); FSDataInputStream in=fs.open(walPath); byte[] fileData=new byte[(int)length]; IOUtils.readFully(in,fileData); in.close(); assertFalse("Cells appear to be plaintext",Bytes.contains(fileData,value)); try { wals.createReader(TEST_UTIL.getTestFileSystem(),walPath); assertFalse(true); } catch ( IOException ioe) { } FileStatus[] listStatus=fs.listStatus(walPath.getParent()); RecoveryMode mode=(conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING); Path rootdir=FSUtils.getRootDir(conf); try { WALSplitter s=new WALSplitter(wals,conf,rootdir,fs,null,null,mode); s.splitLogFile(listStatus[0],null); Path file=new Path(ZKSplitLog.getSplitLogDir(rootdir,listStatus[0].getPath().getName()),"corrupt"); assertTrue(fs.exists(file)); } catch ( IOException ioe) { assertTrue("WAL should have been sidelined",false); } wals.close(); }

Class: org.apache.hadoop.hbase.zookeeper.TestHQuorumPeer

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetZKQuorumServersString(){ Configuration config=new Configuration(TEST_UTIL.getConfiguration()); config.setInt(HConstants.ZOOKEEPER_CLIENT_PORT,8888); config.set(HConstants.ZOOKEEPER_QUORUM,"foo:1234,bar:5678,baz,qux:9012"); String s=ZKConfig.getZKQuorumServersString(config); assertEquals("foo:1234,bar:5678,baz:8888,qux:9012",s); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testShouldAssignDefaultZookeeperClientPort(){ Configuration config=HBaseConfiguration.create(); config.clear(); Properties p=ZKConfig.makeZKProps(config); assertNotNull(p); assertEquals(2181,p.get("clientPort")); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testMakeZKProps(){ Configuration conf=new Configuration(TEST_UTIL.getConfiguration()); conf.set(HConstants.ZOOKEEPER_DATA_DIR,this.dataDir.toString()); Properties properties=ZKConfig.makeZKProps(conf); assertEquals(dataDir.toString(),(String)properties.get("dataDir")); assertEquals(Integer.valueOf(PORT_NO),Integer.valueOf(properties.getProperty("clientPort"))); assertEquals("localhost:2888:3888",properties.get("server.0")); assertEquals(null,properties.get("server.1")); String oldValue=conf.get(HConstants.ZOOKEEPER_QUORUM); conf.set(HConstants.ZOOKEEPER_QUORUM,"a.foo.bar,b.foo.bar,c.foo.bar"); properties=ZKConfig.makeZKProps(conf); assertEquals(dataDir.toString(),properties.get("dataDir")); assertEquals(Integer.valueOf(PORT_NO),Integer.valueOf(properties.getProperty("clientPort"))); assertEquals("a.foo.bar:2888:3888",properties.get("server.0")); assertEquals("b.foo.bar:2888:3888",properties.get("server.1")); assertEquals("c.foo.bar:2888:3888",properties.get("server.2")); assertEquals(null,properties.get("server.3")); conf.set(HConstants.ZOOKEEPER_QUORUM,oldValue); }

Class: org.apache.hadoop.hbase.zookeeper.TestRecoverableZooKeeper

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test public void testSetDataVersionMismatchInLoop() throws Exception { String znode="/hbase/splitWAL/9af7cfc9b15910a0b3d714bf40a3248f"; Configuration conf=TEST_UTIL.getConfiguration(); ZooKeeperWatcher zkw=new ZooKeeperWatcher(conf,"testSetDataVersionMismatchInLoop",abortable,true); String ensemble=ZKConfig.getZKQuorumServersString(conf); RecoverableZooKeeper rzk=ZKUtil.connect(conf,ensemble,zkw); rzk.create(znode,new byte[0],Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); rzk.setData(znode,"OPENING".getBytes(),0); Field zkField=RecoverableZooKeeper.class.getDeclaredField("zk"); zkField.setAccessible(true); int timeout=conf.getInt(HConstants.ZK_SESSION_TIMEOUT,HConstants.DEFAULT_ZK_SESSION_TIMEOUT); ZookeeperStub zkStub=new ZookeeperStub(ensemble,timeout,zkw); zkStub.setThrowExceptionInNumOperations(1); zkField.set(rzk,zkStub); byte[] opened="OPENED".getBytes(); rzk.setData(znode,opened,1); byte[] data=rzk.getData(znode,false,new Stat()); assertTrue(Bytes.equals(opened,data)); }

Class: org.apache.hadoop.hbase.zookeeper.TestZKConfig

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetZooKeeperClusterKey(){ Configuration conf=HBaseConfiguration.create(); conf.set(HConstants.ZOOKEEPER_QUORUM,"\tlocalhost\n"); conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,"3333"); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT,"hbase"); String clusterKey=ZKConfig.getZooKeeperClusterKey(conf,"test"); assertTrue(!clusterKey.contains("\t") && !clusterKey.contains("\n")); assertEquals("localhost:3333:hbase,test",clusterKey); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testZKConfigLoading() throws Exception { Configuration conf=HBaseConfiguration.create(); conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT,2181); Properties props=ZKConfig.makeZKProps(conf); assertEquals("Property client port should have been default from the HBase config","2181",props.getProperty("clientPort")); }

Class: org.apache.hadoop.hbase.zookeeper.TestZKLeaderManager

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testLeaderSelection() throws Exception { MockLeader currentLeader=getCurrentLeader(); assertNotNull("Leader should exist",currentLeader); LOG.debug("Current leader index is " + currentLeader.getIndex()); byte[] znodeData=ZKUtil.getData(currentLeader.getWatcher(),LEADER_ZNODE); assertNotNull("Leader znode should contain leader index",znodeData); assertTrue("Leader znode should not be empty",znodeData.length > 0); int storedIndex=Bytes.toInt(znodeData); LOG.debug("Stored leader index in ZK is " + storedIndex); assertEquals("Leader znode should match leader index",currentLeader.getIndex(),storedIndex); currentLeader.abdicate(); assertFalse(currentLeader.isMaster()); currentLeader=getCurrentLeader(); assertNotNull("New leader should exist after abdication",currentLeader); LOG.debug("New leader index is " + currentLeader.getIndex()); znodeData=ZKUtil.getData(currentLeader.getWatcher(),LEADER_ZNODE); assertNotNull("Leader znode should contain leader index",znodeData); assertTrue("Leader znode should not be empty",znodeData.length > 0); storedIndex=Bytes.toInt(znodeData); LOG.debug("Stored leader index in ZK is " + storedIndex); assertEquals("Leader znode should match leader index",currentLeader.getIndex(),storedIndex); currentLeader.stop("Stopping for test"); assertFalse(currentLeader.isMaster()); currentLeader=getCurrentLeader(); assertNotNull("New leader should exist after stop",currentLeader); LOG.debug("New leader index is " + currentLeader.getIndex()); znodeData=ZKUtil.getData(currentLeader.getWatcher(),LEADER_ZNODE); assertNotNull("Leader znode should contain leader index",znodeData); assertTrue("Leader znode should not be empty",znodeData.length > 0); storedIndex=Bytes.toInt(znodeData); LOG.debug("Stored leader index in ZK is " + storedIndex); assertEquals("Leader znode should match leader index",currentLeader.getIndex(),storedIndex); currentLeader.stop("Stopping for test"); assertFalse(currentLeader.isMaster()); currentLeader=getCurrentLeader(); assertNotNull("New leader should exist",currentLeader); }

Class: org.apache.hadoop.hbase.zookeeper.TestZKMulti

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Verifies that for the given root node, it should delete all the child nodes * recursively using multi-update api. */ @Test(timeout=60000) public void testdeleteChildrenRecursivelyMulti() throws Exception { String parentZNode="/testRootMulti"; createZNodeTree(parentZNode); ZKUtil.deleteChildrenRecursivelyMultiOrSequential(zkw,true,parentZNode); assertTrue("Wrongly deleted parent znode!",ZKUtil.checkExists(zkw,parentZNode) > -1); List children=zkw.getRecoverableZooKeeper().getChildren(parentZNode,false); assertTrue("Failed to delete child znodes!",0 == children.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
@Test(timeout=60000) public void testDeleteChildrenRecursivelyMultiOrSequential() throws Exception { String parentZNode1="/testdeleteChildren1"; String parentZNode2="/testdeleteChildren2"; String parentZNode3="/testdeleteChildren3"; createZNodeTree(parentZNode1); createZNodeTree(parentZNode2); createZNodeTree(parentZNode3); ZKUtil.deleteChildrenRecursivelyMultiOrSequential(zkw,true,parentZNode1,parentZNode2,parentZNode3); assertTrue("Wrongly deleted parent znode 1!",ZKUtil.checkExists(zkw,parentZNode1) > -1); List children=zkw.getRecoverableZooKeeper().getChildren(parentZNode1,false); assertTrue("Failed to delete child znodes of parent znode 1!",0 == children.size()); assertTrue("Wrongly deleted parent znode 2!",ZKUtil.checkExists(zkw,parentZNode2) > -1); children=zkw.getRecoverableZooKeeper().getChildren(parentZNode2,false); assertTrue("Failed to delete child znodes of parent znode 1!",0 == children.size()); assertTrue("Wrongly deleted parent znode 3!",ZKUtil.checkExists(zkw,parentZNode3) > -1); children=zkw.getRecoverableZooKeeper().getChildren(parentZNode3,false); assertTrue("Failed to delete child znodes of parent znode 1!",0 == children.size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier 
/** * Verifies that for the given root node, it should delete all the child nodes * recursively using normal sequential way. */ @Test(timeout=60000) public void testdeleteChildrenRecursivelySequential() throws Exception { String parentZNode="/testRootSeq"; createZNodeTree(parentZNode); boolean useMulti=zkw.getConfiguration().getBoolean("hbase.zookeeper.useMulti",false); zkw.getConfiguration().setBoolean("hbase.zookeeper.useMulti",false); try { ZKUtil.deleteChildrenRecursivelyMultiOrSequential(zkw,true,parentZNode); assertTrue("Wrongly deleted parent znode!",ZKUtil.checkExists(zkw,parentZNode) > -1); List children=zkw.getRecoverableZooKeeper().getChildren(parentZNode,false); assertTrue("Failed to delete child znodes!",0 == children.size()); } finally { zkw.getConfiguration().setBoolean("hbase.zookeeper.useMulti",useMulti); } }

InternalCallVerifier BooleanVerifier 
/** * Verifies that for the given root node, it should delete all the nodes recursively using * normal sequential way. */ @Test(timeout=60000) public void testDeleteNodeRecursivelySequential() throws Exception { String parentZNode="/testdeleteNodeRecursivelySequential"; createZNodeTree(parentZNode); boolean useMulti=zkw.getConfiguration().getBoolean("hbase.zookeeper.useMulti",false); zkw.getConfiguration().setBoolean("hbase.zookeeper.useMulti",false); try { ZKUtil.deleteNodeRecursively(zkw,parentZNode); assertTrue("Parent znode should be deleted.",ZKUtil.checkExists(zkw,parentZNode) == -1); } finally { zkw.getConfiguration().setBoolean("hbase.zookeeper.useMulti",useMulti); } }

Class: org.apache.hadoop.hbase.zookeeper.TestZKUtil

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateACL() throws ZooKeeperConnectionException, IOException { Configuration conf=HBaseConfiguration.create(); conf.set(Superusers.SUPERUSER_CONF_KEY,"user1,@group1,user2,@group2,user3"); String node="/hbase/testCreateACL"; ZooKeeperWatcher watcher=new ZooKeeperWatcher(conf,node,null,false); List aclList=ZKUtil.createACL(watcher,node,true); Assert.assertEquals(aclList.size(),4); Assert.assertTrue(!aclList.contains(new ACL(Perms.ALL,new Id("auth","@group1"))) && !aclList.contains(new ACL(Perms.ALL,new Id("auth","@group2")))); Assert.assertTrue(aclList.contains(new ACL(Perms.ALL,new Id("auth","user1"))) && aclList.contains(new ACL(Perms.ALL,new Id("auth","user2"))) && aclList.contains(new ACL(Perms.ALL,new Id("auth","user3")))); }

Class: org.apache.hadoop.hbase.zookeeper.TestZooKeeperACL

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * When authentication is enabled on Zookeeper, /hbase/master should be * created with 2 ACLs: one specifies that the hbase user has full access * to the node; the other, that it is world-readable. */ @Test(timeout=30000) public void testHBaseMasterServerZNodeACL() throws Exception { if (!secureZKAvailable) { return; } List acls=zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/hbase/master",new Stat()); assertEquals(acls.size(),2); boolean foundWorldReadableAcl=false; boolean foundHBaseOwnerAcl=false; for (int i=0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { assertEquals(acls.get(0).getId().getId(),"anyone"); assertEquals(acls.get(0).getPerms(),ZooDefs.Perms.READ); foundWorldReadableAcl=true; } else { if (acls.get(i).getId().getScheme().equals("sasl") == true) { assertEquals(acls.get(1).getId().getId(),"hbase"); assertEquals(acls.get(1).getId().getScheme(),"sasl"); foundHBaseOwnerAcl=true; } else { assertTrue(false); } } } assertTrue(foundWorldReadableAcl); assertTrue(foundHBaseOwnerAcl); }

InternalCallVerifier EqualityVerifier 
/** * Create a node and check its ACL. When authentication is enabled on * Zookeeper, all nodes (except /hbase/root-region-server, /hbase/master * and /hbase/hbaseid) should be created so that only the hbase server user * (master or region server user) that created them can access them, and * this user should have all permissions on this node. For * /hbase/root-region-server, /hbase/master, and /hbase/hbaseid the * permissions should be as above, but should also be world-readable. First * we check the general case of /hbase nodes in the following test, and * then check the subset of world-readable nodes in the three tests after * that. */ @Test(timeout=30000) public void testHBaseRootZNodeACL() throws Exception { if (!secureZKAvailable) { return; } List acls=zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/hbase",new Stat()); assertEquals(acls.size(),1); assertEquals(acls.get(0).getId().getScheme(),"sasl"); assertEquals(acls.get(0).getId().getId(),"hbase"); assertEquals(acls.get(0).getPerms(),ZooDefs.Perms.ALL); }

InternalCallVerifier EqualityVerifier 
/** * Check if ZooKeeper JaasConfiguration is valid. */ @Test public void testIsZooKeeperSecure() throws Exception { boolean testJaasConfig=ZKUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); assertEquals(testJaasConfig,secureZKAvailable); File saslConfFile=File.createTempFile("tmp","fakeJaas.conf"); FileWriter fwriter=new FileWriter(saslConfFile); fwriter.write(""); fwriter.close(); System.setProperty("java.security.auth.login.config",saslConfFile.getAbsolutePath()); testJaasConfig=ZKUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); assertEquals(testJaasConfig,false); saslConfFile.delete(); }

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * When authentication is enabled on Zookeeper, /hbase/hbaseid should be * created with 2 ACLs: one specifies that the hbase user has full access * to the node; the other, that it is world-readable. */ @Test(timeout=30000) public void testHBaseIDZNodeACL() throws Exception { if (!secureZKAvailable) { return; } List acls=zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/hbase/hbaseid",new Stat()); assertEquals(acls.size(),2); boolean foundWorldReadableAcl=false; boolean foundHBaseOwnerAcl=false; for (int i=0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { assertEquals(acls.get(0).getId().getId(),"anyone"); assertEquals(acls.get(0).getPerms(),ZooDefs.Perms.READ); foundWorldReadableAcl=true; } else { if (acls.get(i).getId().getScheme().equals("sasl") == true) { assertEquals(acls.get(1).getId().getId(),"hbase"); assertEquals(acls.get(1).getId().getScheme(),"sasl"); foundHBaseOwnerAcl=true; } else { assertTrue(false); } } } assertTrue(foundWorldReadableAcl); assertTrue(foundHBaseOwnerAcl); }

IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * When authentication is enabled on Zookeeper, /hbase/root-region-server * should be created with 2 ACLs: one specifies that the hbase user has * full access to the node; the other, that it is world-readable. */ @Test(timeout=30000) public void testHBaseRootRegionServerZNodeACL() throws Exception { if (!secureZKAvailable) { return; } List acls=zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/hbase/root-region-server",new Stat()); assertEquals(acls.size(),2); boolean foundWorldReadableAcl=false; boolean foundHBaseOwnerAcl=false; for (int i=0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { assertEquals(acls.get(0).getId().getId(),"anyone"); assertEquals(acls.get(0).getPerms(),ZooDefs.Perms.READ); foundWorldReadableAcl=true; } else { if (acls.get(i).getId().getScheme().equals("sasl") == true) { assertEquals(acls.get(1).getId().getId(),"hbase"); assertEquals(acls.get(1).getId().getScheme(),"sasl"); foundHBaseOwnerAcl=true; } else { assertTrue(false); } } } assertTrue(foundWorldReadableAcl); assertTrue(foundHBaseOwnerAcl); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Check if Programmatic way of setting zookeeper security settings is valid. */ @Test public void testIsZooKeeperSecureWithProgrammaticConfig() throws Exception { javax.security.auth.login.Configuration.setConfiguration(new DummySecurityConfiguration()); Configuration config=new Configuration(HBaseConfiguration.create()); boolean testJaasConfig=ZKUtil.isSecureZooKeeper(config); assertEquals(testJaasConfig,false); config.set("hbase.security.authentication","kerberos"); testJaasConfig=ZKUtil.isSecureZooKeeper(config); assertEquals(testJaasConfig,false); config.set(HConstants.ZK_CLIENT_KEYTAB_FILE,"/dummy/file"); config.set(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL,"dummy"); config.set(HConstants.ZK_SERVER_KEYTAB_FILE,"/dummy/file"); config.set(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL,"dummy"); testJaasConfig=ZKUtil.isSecureZooKeeper(config); assertEquals(true,testJaasConfig); }

InternalCallVerifier EqualityVerifier 
/** * Finally, we check the ACLs of a node outside of the /hbase hierarchy and * verify that its ACL is simply 'hbase:Perms.ALL'. */ @Test public void testOutsideHBaseNodeACL() throws Exception { if (!secureZKAvailable) { return; } ZKUtil.createWithParents(zkw,"/testACLNode"); List acls=zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/testACLNode",new Stat()); assertEquals(acls.size(),1); assertEquals(acls.get(0).getId().getScheme(),"sasl"); assertEquals(acls.get(0).getId().getId(),"hbase"); assertEquals(acls.get(0).getPerms(),ZooDefs.Perms.ALL); }

Class: org.apache.hadoop.hbase.zookeeper.TestZooKeeperMainServer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * We need delete of a znode to work at least. * @throws Exception */ @Test public void testCommandLineWorks() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); HBaseTestingUtility htu=new HBaseTestingUtility(); htu.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT,1000); htu.startMiniZKCluster(); try { ZooKeeperWatcher zkw=htu.getZooKeeperWatcher(); String znode="/testCommandLineWorks"; ZKUtil.createWithParents(zkw,znode,HConstants.EMPTY_BYTE_ARRAY); ZKUtil.checkExists(zkw,znode); boolean exception=false; try { ZooKeeperMainServer.main(new String[]{"-server","localhost:" + htu.getZkCluster().getClientPort(),"delete",znode}); } catch ( ExitException ee) { exception=true; } assertTrue(exception); assertEquals(-1,ZKUtil.checkExists(zkw,znode)); } finally { htu.shutdownMiniZKCluster(); System.setSecurityManager(null); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHostPortParse(){ ZooKeeperMainServer parser=new ZooKeeperMainServer(); Configuration c=HBaseConfiguration.create(); assertEquals("localhost:" + c.get(HConstants.ZOOKEEPER_CLIENT_PORT),parser.parse(c)); final String port="1234"; c.set(HConstants.ZOOKEEPER_CLIENT_PORT,port); c.set("hbase.zookeeper.quorum","example.com"); assertEquals("example.com:" + port,parser.parse(c)); c.set("hbase.zookeeper.quorum","example1.com,example2.com,example3.com"); String ensemble=parser.parse(c); assertTrue(port,ensemble.matches("(example[1-3]\\.com:1234,){2}example[1-3]\\.com:" + port)); c.set("hbase.zookeeper.quorum","example1.com:5678,example2.com:9012,example3.com:3456"); ensemble=parser.parse(c); assertEquals(ensemble,"example1.com:5678,example2.com:9012,example3.com:3456"); c.set("hbase.zookeeper.quorum","example1.com:5678,example2.com:9012,example3.com"); ensemble=parser.parse(c); assertEquals(ensemble,"example1.com:5678,example2.com:9012,example3.com:" + port); }

Class: org.apache.hadoop.hbase.zookeeper.TestZooKeeperNodeTracker

InternalCallVerifier BooleanVerifier 
@Test public void testCleanZNode() throws Exception { ZooKeeperWatcher zkw=new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),"testNodeTracker",new TestZooKeeperNodeTracker.StubAbortable()); final ServerName sn=ServerName.valueOf("127.0.0.1:52",45L); ZKUtil.createAndFailSilent(zkw,TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_ZNODE_PARENT,HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT)); final String nodeName=zkw.getMasterAddressZNode(); ZKUtil.createAndFailSilent(zkw,nodeName); MasterAddressTracker.deleteIfEquals(zkw,sn.toString()); Assert.assertFalse(ZKUtil.getData(zkw,nodeName) == null); ZKUtil.setData(zkw,nodeName,MasterAddressTracker.toByteArray(sn,0)); MasterAddressTracker.deleteIfEquals(zkw,ServerName.valueOf("127.0.0.2:52",45L).toString()); Assert.assertFalse(ZKUtil.getData(zkw,nodeName) == null); ZKUtil.setData(zkw,nodeName,MasterAddressTracker.toByteArray(sn,0)); MasterAddressTracker.deleteIfEquals(zkw,sn.toString()); Assert.assertTrue(ZKUtil.getData(zkw,nodeName) == null); MasterAddressTracker.deleteIfEquals(zkw,sn.toString()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testNodeTracker() throws Exception { Abortable abortable=new StubAbortable(); ZooKeeperWatcher zk=new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),"testNodeTracker",abortable); ZKUtil.createAndFailSilent(zk,zk.baseZNode); final String node=ZKUtil.joinZNode(zk.baseZNode,new Long(rand.nextLong()).toString()); final byte[] dataOne=Bytes.toBytes("dataOne"); final byte[] dataTwo=Bytes.toBytes("dataTwo"); TestTracker localTracker=new TestTracker(zk,node,abortable); localTracker.start(); zk.registerListener(localTracker); assertNull(localTracker.getData(false)); WaitToGetDataThread thread=new WaitToGetDataThread(zk,node); thread.start(); assertFalse(thread.hasData); TestTracker secondTracker=new TestTracker(zk,node,null); secondTracker.start(); zk.registerListener(secondTracker); TestingZKListener zkListener=new TestingZKListener(zk,node); zk.registerListener(zkListener); assertEquals(0,zkListener.createdLock.availablePermits()); final ZooKeeper zkconn=new ZooKeeper(ZKConfig.getZKQuorumServersString(TEST_UTIL.getConfiguration()),60000,new StubWatcher()); zkconn.create(node,dataOne,Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); zkListener.waitForCreation(); thread.join(); assertNotNull(localTracker.getData(false)); assertNotNull(localTracker.blockUntilAvailable()); assertTrue(Bytes.equals(localTracker.getData(false),dataOne)); assertTrue(thread.hasData); assertTrue(Bytes.equals(thread.tracker.getData(false),dataOne)); LOG.info("Successfully got data one"); assertNotNull(secondTracker.getData(false)); assertNotNull(secondTracker.blockUntilAvailable()); assertTrue(Bytes.equals(secondTracker.getData(false),dataOne)); LOG.info("Successfully got data one with the second tracker"); zkconn.delete(node,-1); zkListener.waitForDeletion(); TestTracker threadTracker=thread.tracker; thread=new WaitToGetDataThread(zk,node,threadTracker); thread.start(); assertFalse(thread.hasData); assertNull(secondTracker.getData(false)); assertNull(localTracker.getData(false)); LOG.info("Successfully made unavailable"); zkconn.create(node,dataTwo,Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); zkListener.waitForCreation(); thread.join(); assertNotNull(localTracker.getData(false)); assertNotNull(localTracker.blockUntilAvailable()); assertTrue(Bytes.equals(localTracker.getData(false),dataTwo)); assertNotNull(secondTracker.getData(false)); assertNotNull(secondTracker.blockUntilAvailable()); assertTrue(Bytes.equals(secondTracker.getData(false),dataTwo)); assertTrue(thread.hasData); assertTrue(Bytes.equals(thread.tracker.getData(false),dataTwo)); LOG.info("Successfully got data two on all trackers and threads"); zkconn.setData(node,dataOne,-1); zkListener.waitForDataChange(); assertNotNull(localTracker.getData(false)); assertNotNull(localTracker.blockUntilAvailable()); assertTrue(Bytes.equals(localTracker.getData(false),dataOne)); assertNotNull(secondTracker.getData(false)); assertNotNull(secondTracker.blockUntilAvailable()); assertTrue(Bytes.equals(secondTracker.getData(false),dataOne)); assertTrue(thread.hasData); assertTrue(Bytes.equals(thread.tracker.getData(false),dataOne)); LOG.info("Successfully got data one following a data change on all trackers and threads"); }

Class: org.apache.hadoop.hbase.zookeeper.TestZooKeeperWatcher

InternalCallVerifier BooleanVerifier 
@Test public void testIsClientReadable() throws ZooKeeperConnectionException, IOException { ZooKeeperWatcher watcher=new ZooKeeperWatcher(HBaseConfiguration.create(),"testIsClientReadable",null,false); assertTrue(watcher.isClientReadable(watcher.baseZNode)); assertTrue(watcher.isClientReadable(watcher.getZNodeForReplica(0))); assertTrue(watcher.isClientReadable(watcher.getMasterAddressZNode())); assertTrue(watcher.isClientReadable(watcher.clusterIdZNode)); assertTrue(watcher.isClientReadable(watcher.tableZNode)); assertTrue(watcher.isClientReadable(ZKUtil.joinZNode(watcher.tableZNode,"foo"))); assertTrue(watcher.isClientReadable(watcher.rsZNode)); assertFalse(watcher.isClientReadable(watcher.tableLockZNode)); assertFalse(watcher.isClientReadable(watcher.balancerZNode)); assertFalse(watcher.isClientReadable(watcher.getRegionNormalizerZNode())); assertFalse(watcher.isClientReadable(watcher.clusterStateZNode)); assertFalse(watcher.isClientReadable(watcher.drainingZNode)); assertFalse(watcher.isClientReadable(watcher.recoveringRegionsZNode)); assertFalse(watcher.isClientReadable(watcher.splitLogZNode)); assertFalse(watcher.isClientReadable(watcher.backupMasterAddressesZNode)); watcher.close(); }

Class: org.apache.hadoop.hbase.zookeeper.lock.TestZKInterProcessReadWriteLock

APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier 
@Test(timeout=60000) public void testMultipleClients() throws Exception { final String testName="testMultipleClients"; ZooKeeperWatcher zkWatcher1=new ZooKeeperWatcher(conf,"testMultipleClients-1",null); ZooKeeperWatcher zkWatcher2=new ZooKeeperWatcher(conf,"testMultipleClients-2",null); String znode=ZKUtil.joinZNode(zkWatcher1.tableLockZNode,testName); ZKInterProcessReadWriteLock clientLock1=new ZKInterProcessReadWriteLock(zkWatcher1,znode,null); ZKInterProcessReadWriteLock clientLock2=new ZKInterProcessReadWriteLock(zkWatcher2,znode,null); InterProcessLock lock1=clientLock1.readLock(Bytes.toBytes("client1")); lock1.acquire(); InterProcessLock lock2=clientLock2.writeLock(Bytes.toBytes("client2")); assertFalse(lock2.tryAcquire(1000)); lock1.release(); assertTrue(lock2.tryAcquire(5000)); lock2.release(); zkWatcher1.close(); zkWatcher2.close(); }